From eaac13c785cb560ebaa02f9c2ffdb4ab6ad017dc Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 18 Jun 2024 15:42:37 +0100 Subject: [PATCH 01/44] AwaitsFix for #109871 --- .../xpack/security/authc/oidc/OpenIdConnectAuthIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/qa/oidc-op-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthIT.java b/x-pack/qa/oidc-op-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthIT.java index cd37d86626333..e80773d572b03 100644 --- a/x-pack/qa/oidc-op-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthIT.java +++ b/x-pack/qa/oidc-op-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthIT.java @@ -121,6 +121,7 @@ public void testAuthenticateWithCodeFlowAndClientPost() throws Exception { verifyElasticsearchAccessTokenForCodeFlow(tokens.v1()); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/109871") public void testAuthenticateWithCodeFlowAndClientJwtPost() throws Exception { final PrepareAuthResponse prepareAuthResponse = getRedirectedFromFacilitator(REALM_NAME_CLIENT_JWT_AUTH); final String redirectUri = authenticateAtOP(prepareAuthResponse.getAuthUri()); From 5440f178aada864fd954f5773eb071d5db1093b7 Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Tue, 18 Jun 2024 08:37:27 -0700 Subject: [PATCH 02/44] Support synthetic source for geo_point when ignore_malformed is used (#109651) --- docs/changelog/109651.yaml | 5 ++ .../mapping/types/geo-point.asciidoc | 3 +- .../xcontent/CopyingXContentParser.java | 43 ++++++++++ .../xcontent/XContentBuilder.java | 12 +++ .../mapper/LegacyGeoShapeFieldMapper.java | 7 +- .../mapper/AbstractGeometryFieldMapper.java | 61 +++++++++++--- .../AbstractPointGeometryFieldMapper.java | 17 ++-- .../index/mapper/GeoPointFieldMapper.java | 57 +++++++++++-- .../index/mapper/GeoShapeParser.java | 12 +-- .../mapper/IgnoreMalformedStoredValues.java | 20 +++++ .../index/mapper/XContentDataHelper.java | 7 ++ .../mapper/GeoPointFieldMapperTests.java | 68 +++++++++++----- .../spatial/index/mapper/ShapeParser.java | 12 +-- .../index/mapper/PointFieldMapperTests.java | 2 +- .../test/spatial/140_synthetic_source.yml | 80 +++++++++++++++++++ 15 files changed, 337 insertions(+), 69 deletions(-) create mode 100644 docs/changelog/109651.yaml create mode 100644 libs/x-content/src/main/java/org/elasticsearch/xcontent/CopyingXContentParser.java diff --git a/docs/changelog/109651.yaml b/docs/changelog/109651.yaml new file mode 100644 index 0000000000000..982e6a5b536cc --- /dev/null +++ b/docs/changelog/109651.yaml @@ -0,0 +1,5 @@ +pr: 109651 +summary: Support synthetic source for `geo_point` when `ignore_malformed` is used +area: Mapping +type: enhancement +issues: [] diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index aab40efd15acf..6db05188dfb98 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -220,8 +220,7 @@ any issues, but features in technical preview are not subject to the support SLA of official GA features. `geo_point` fields support <> in their -default configuration. Synthetic `_source` cannot be used together with -<>, <>, or with +default configuration. Synthetic `_source` cannot be used together with <> or with <> disabled. Synthetic source always sorts `geo_point` fields (first by latitude and then diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/CopyingXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/CopyingXContentParser.java new file mode 100644 index 0000000000000..b8e6e1330e0c2 --- /dev/null +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/CopyingXContentParser.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.xcontent; + +import java.io.IOException; + +/** + * A parser that copies data that was parsed into a {@link XContentBuilder}. + * This parser naturally has some memory and runtime overhead to perform said copying. + * Use with {@link XContentSubParser} to preserve the entire object. + */ +public class CopyingXContentParser extends FilterXContentParserWrapper { + private final XContentBuilder builder; + + public CopyingXContentParser(XContentParser delegate) throws IOException { + super(delegate); + this.builder = XContentBuilder.builder(delegate.contentType().xContent()); + switch (delegate.currentToken()) { + case START_OBJECT -> builder.startObject(); + case START_ARRAY -> builder.startArray(); + default -> throw new IllegalArgumentException( + "can only copy parsers pointed to START_OBJECT or START_ARRAY but found: " + delegate.currentToken() + ); + } + } + + @Override + public Token nextToken() throws IOException { + XContentParser.Token next = delegate().nextToken(); + builder.copyCurrentEvent(delegate()); + return next; + } + + public XContentBuilder getBuilder() { + return builder; + } +} diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java index 2143814565a51..1be4594b097a6 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java @@ -1220,6 +1220,18 @@ public XContentBuilder rawValue(String value) throws IOException { return this; } + /** + * Copies current event from parser into this builder. + * The difference with {@link XContentBuilder#copyCurrentStructure(XContentParser)} + * is that this method does not copy sub-objects as a single entity. + * @param parser + * @throws IOException + */ + public XContentBuilder copyCurrentEvent(XContentParser parser) throws IOException { + generator.copyCurrentEvent(parser); + return this; + } + public XContentBuilder copyCurrentStructure(XContentParser parser) throws IOException { generator.copyCurrentStructure(parser); return this; diff --git a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java index 4ef2b2e07bb26..4678215dd5b60 100644 --- a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java +++ b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java @@ -58,7 +58,6 @@ import java.util.Map; import java.util.Objects; import java.util.Set; -import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; @@ -379,18 +378,18 @@ private LegacyGeoShapeParser() {} public void parse( XContentParser parser, CheckedConsumer, IOException> consumer, - Consumer onMalformed + MalformedValueHandler malformedHandler ) throws IOException { try { if (parser.currentToken() == XContentParser.Token.START_ARRAY) { while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - parse(parser, consumer, onMalformed); + parse(parser, consumer, malformedHandler); } } else { consumer.accept(ShapeParser.parse(parser)); } } catch (ElasticsearchParseException e) { - onMalformed.accept(e); + malformedHandler.notify(e); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java index 7cadec68f3e61..831244a3969ef 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java @@ -8,12 +8,14 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.search.Query; +import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.geo.GeometryFormatterFactory; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.xcontent.DeprecationHandler; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.support.MapXContentParser; @@ -52,12 +54,12 @@ public abstract static class Parser { * Parse the given xContent value to one or more objects of type {@link T}. The value can be * in any supported format. */ - public abstract void parse(XContentParser parser, CheckedConsumer consumer, Consumer onMalformed) + public abstract void parse(XContentParser parser, CheckedConsumer consumer, MalformedValueHandler malformedHandler) throws IOException; private void fetchFromSource(Object sourceMap, Consumer consumer) { try (XContentParser parser = wrapObject(sourceMap)) { - parse(parser, v -> consumer.accept(normalizeFromSource(v)), e -> {}); /* ignore malformed */ + parse(parser, v -> consumer.accept(normalizeFromSource(v)), NoopMalformedValueHandler.INSTANCE); } catch (IOException e) { throw new UncheckedIOException(e); } @@ -84,6 +86,36 @@ private static XContentParser wrapObject(Object sourceMap) throws IOException { } } + public interface MalformedValueHandler { + void notify(Exception parsingException) throws IOException; + + void notify(Exception parsingException, XContentBuilder malformedDataForSyntheticSource) throws IOException; + } + + public record NoopMalformedValueHandler() implements MalformedValueHandler { + public static final NoopMalformedValueHandler INSTANCE = new NoopMalformedValueHandler(); + + @Override + public void notify(Exception parsingException) {} + + @Override + public void notify(Exception parsingException, XContentBuilder malformedDataForSyntheticSource) {} + } + + public record DefaultMalformedValueHandler(CheckedBiConsumer consumer) + implements + MalformedValueHandler { + @Override + public void notify(Exception parsingException) throws IOException { + consumer.accept(parsingException, null); + } + + @Override + public void notify(Exception parsingException, XContentBuilder malformedDataForSyntheticSource) throws IOException { + consumer.accept(parsingException, malformedDataForSyntheticSource); + } + } + public abstract static class AbstractGeometryFieldType extends MappedFieldType { protected final Parser geometryParser; @@ -220,17 +252,20 @@ public final void parse(DocumentParserContext context) throws IOException { new IllegalArgumentException("Cannot index data directly into a field with a [script] parameter") ); } - parser.parse(context.parser(), v -> index(context, v), e -> { - if (ignoreMalformed()) { - context.addIgnoredField(fieldType().name()); - } else { - throw new DocumentParsingException( - context.parser().getTokenLocation(), - "failed to parse field [" + fieldType().name() + "] of type [" + contentType() + "]", - e - ); - } - }); + parser.parse(context.parser(), v -> index(context, v), new DefaultMalformedValueHandler((e, b) -> onMalformedValue(context, b, e))); + } + + protected void onMalformedValue(DocumentParserContext context, XContentBuilder malformedDataForSyntheticSource, Exception cause) + throws IOException { + if (ignoreMalformed()) { + context.addIgnoredField(fieldType().name()); + } else { + throw new DocumentParsingException( + context.parser().getTokenLocation(), + "failed to parse field [" + fieldType().name() + "] of type [" + contentType() + "]", + cause + ); + } } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java index 9136a0dfbf550..2b4ecc8f0a89d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java @@ -18,7 +18,6 @@ import java.io.IOException; import java.util.Map; import java.util.Objects; -import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; @@ -71,7 +70,7 @@ public T getNullValue() { /** A base parser implementation for point formats */ protected abstract static class PointParser extends Parser { protected final String field; - private final CheckedFunction objectParser; + protected final CheckedFunction objectParser; private final T nullValue; private final boolean ignoreZValue; protected final boolean ignoreMalformed; @@ -98,7 +97,7 @@ protected PointParser( protected abstract T createPoint(double x, double y); @Override - public void parse(XContentParser parser, CheckedConsumer consumer, Consumer onMalformed) + public void parse(XContentParser parser, CheckedConsumer consumer, MalformedValueHandler malformedHandler) throws IOException { if (parser.currentToken() == XContentParser.Token.START_ARRAY) { XContentParser.Token token = parser.nextToken(); @@ -132,7 +131,7 @@ public void parse(XContentParser parser, CheckedConsumer consume consumer.accept(nullValue); } } else { - parseAndConsumeFromObject(parser, consumer, onMalformed); + parseAndConsumeFromObject(parser, consumer, malformedHandler); } token = parser.nextToken(); } @@ -142,20 +141,20 @@ public void parse(XContentParser parser, CheckedConsumer consume consumer.accept(nullValue); } } else { - parseAndConsumeFromObject(parser, consumer, onMalformed); + parseAndConsumeFromObject(parser, consumer, malformedHandler); } } - private void parseAndConsumeFromObject( + protected void parseAndConsumeFromObject( XContentParser parser, CheckedConsumer consumer, - Consumer onMalformed - ) { + MalformedValueHandler malformedHandler + ) throws IOException { try { T point = objectParser.apply(parser); consumer.accept(validate(point)); } catch (Exception e) { - onMalformed.accept(e); + malformedHandler.notify(e); } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java index 296e7df98b0cf..b31a61d50ecdb 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.SimpleVectorTileFormatter; import org.elasticsearch.common.unit.DistanceUnit; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.geometry.Point; import org.elasticsearch.index.IndexMode; @@ -45,6 +46,7 @@ import org.elasticsearch.search.lookup.FieldValues; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.runtime.GeoPointScriptFieldDistanceFeatureQuery; +import org.elasticsearch.xcontent.CopyingXContentParser; import org.elasticsearch.xcontent.FilterXContentParserWrapper; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -193,13 +195,15 @@ private FieldValues scriptValues() { @Override public FieldMapper build(MapperBuilderContext context) { + boolean ignoreMalformedEnabled = ignoreMalformed.get().value(); Parser geoParser = new GeoPointParser( name(), (parser) -> GeoUtils.parseGeoPoint(parser, ignoreZValue.get().value()), nullValue.get(), ignoreZValue.get().value(), - ignoreMalformed.get().value(), - metric.get() != TimeSeriesParams.MetricType.POSITION + ignoreMalformedEnabled, + metric.get() != TimeSeriesParams.MetricType.POSITION, + context.isSourceSynthetic() && ignoreMalformedEnabled ); GeoPointFieldType ft = new GeoPointFieldType( context.buildFullName(name()), @@ -524,6 +528,7 @@ public TimeSeriesParams.MetricType getMetricType() { /** GeoPoint parser implementation */ private static class GeoPointParser extends PointParser { + private final boolean storeMalformedDataForSyntheticSource; GeoPointParser( String field, @@ -531,9 +536,11 @@ private static class GeoPointParser extends PointParser { GeoPoint nullValue, boolean ignoreZValue, boolean ignoreMalformed, - boolean allowMultipleValues + boolean allowMultipleValues, + boolean storeMalformedDataForSyntheticSource ) { super(field, objectParser, nullValue, ignoreZValue, ignoreMalformed, allowMultipleValues); + this.storeMalformedDataForSyntheticSource = storeMalformedDataForSyntheticSource; } protected GeoPoint validate(GeoPoint in) { @@ -568,6 +575,45 @@ public GeoPoint normalizeFromSource(GeoPoint point) { // normalize during parsing return point; } + + @Override + protected void parseAndConsumeFromObject( + XContentParser parser, + CheckedConsumer consumer, + MalformedValueHandler malformedHandler + ) throws IOException { + XContentParser parserWithCustomization = parser; + XContentBuilder malformedDataForSyntheticSource = null; + + if (storeMalformedDataForSyntheticSource) { + if (parser.currentToken() == XContentParser.Token.START_OBJECT + || parser.currentToken() == XContentParser.Token.START_ARRAY) { + // We have a complex structure so we'll memorize it while parsing. + var copyingParser = new CopyingXContentParser(parser); + malformedDataForSyntheticSource = copyingParser.getBuilder(); + parserWithCustomization = copyingParser; + } else { + // We have a single value (e.g. a string) that is potentially malformed, let's simply remember it. + malformedDataForSyntheticSource = XContentBuilder.builder(parser.contentType().xContent()).copyCurrentStructure(parser); + } + } + + try { + GeoPoint point = objectParser.apply(parserWithCustomization); + consumer.accept(validate(point)); + } catch (Exception e) { + malformedHandler.notify(e, malformedDataForSyntheticSource); + } + } + } + + @Override + protected void onMalformedValue(DocumentParserContext context, XContentBuilder malformedDataForSyntheticSource, Exception cause) + throws IOException { + super.onMalformedValue(context, malformedDataForSyntheticSource, cause); + if (malformedDataForSyntheticSource != null) { + context.doc().add(IgnoreMalformedStoredValues.storedField(name(), malformedDataForSyntheticSource)); + } } @Override @@ -585,11 +631,6 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it doesn't have doc values" ); } - if (ignoreMalformed()) { - throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it ignores malformed points" - ); - } if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeParser.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeParser.java index d20a700faff81..42f735a58cf51 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeParser.java @@ -18,7 +18,6 @@ import java.io.IOException; import java.text.ParseException; -import java.util.function.Consumer; public class GeoShapeParser extends AbstractGeometryFieldMapper.Parser { private final GeometryParser geometryParser; @@ -30,18 +29,21 @@ public GeoShapeParser(GeometryParser geometryParser, Orientation orientation) { } @Override - public void parse(XContentParser parser, CheckedConsumer consumer, Consumer onMalformed) - throws IOException { + public void parse( + XContentParser parser, + CheckedConsumer consumer, + AbstractGeometryFieldMapper.MalformedValueHandler malformedHandler + ) throws IOException { try { if (parser.currentToken() == XContentParser.Token.START_ARRAY) { while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - parse(parser, consumer, onMalformed); + parse(parser, consumer, malformedHandler); } } else { consumer.accept(geometryParser.parse(parser)); } } catch (ParseException | ElasticsearchParseException | IllegalArgumentException e) { - onMalformed.accept(e); + malformedHandler.notify(e); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java index 52f4048e9b230..6ad0823738ba0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java @@ -25,10 +25,30 @@ * {@code _source}. */ public abstract class IgnoreMalformedStoredValues { + /** + * Creates a stored field that stores malformed data to be used in synthetic source. + * Name of the stored field is original name of the field with added conventional suffix. + * @param name original name of the field + * @param parser parser to grab field content from + * @return + * @throws IOException + */ public static StoredField storedField(String name, XContentParser parser) throws IOException { return XContentDataHelper.storedField(name(name), parser); } + /** + * Creates a stored field that stores malformed data to be used in synthetic source. + * Name of the stored field is original name of the field with added conventional suffix. + * @param name original name of the field + * @param builder malformed data + * @return + * @throws IOException + */ + public static StoredField storedField(String name, XContentBuilder builder) throws IOException { + return XContentDataHelper.storedField(name(name), builder); + } + /** * Build a {@link IgnoreMalformedStoredValues} that never contains any values. */ diff --git a/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java b/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java index 6b5b2537e5e1f..254a0bc9c906b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java @@ -45,6 +45,13 @@ static StoredField storedField(String name, XContentParser parser) throws IOExce return (StoredField) processToken(parser, typeUtils -> typeUtils.buildStoredField(name, parser)); } + /** + * Build a {@link StoredField} for the value provided in a {@link XContentBuilder}. + */ + static StoredField storedField(String name, XContentBuilder builder) throws IOException { + return new StoredField(name, TypeUtils.encode(builder)); + } + /** * Build a {@link BytesRef} wrapping a byte array containing an encoded form * the value on which the parser is currently positioned. diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java index a5d705076561b..a389e803e66b6 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoJson; import org.elasticsearch.common.geo.GeoPoint; -import org.elasticsearch.core.Tuple; import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.Point; @@ -39,6 +38,8 @@ import java.util.Map; import java.util.Objects; import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Stream; import static org.elasticsearch.geometry.utils.Geohash.stringEncode; import static org.elasticsearch.test.ListMatcher.matchesList; @@ -535,6 +536,8 @@ protected List exampleMalformedValues() { ), exampleMalformedValue("-,1.3").errorMatches("latitude must be a number"), exampleMalformedValue("1.3,-").errorMatches("longitude must be a number"), + exampleMalformedValue(b -> b.startObject().field("lat", 1.3).endObject()).errorMatches("Required [lon]"), + exampleMalformedValue(b -> b.startObject().field("lon", 1.3).endObject()).errorMatches("Required [lat]"), exampleMalformedValue(b -> b.startObject().field("lat", "NaN").field("lon", 1.2).endObject()).errorMatches("Required [lat]"), exampleMalformedValue(b -> b.startObject().field("lat", 1.2).field("lon", "NaN").endObject()).errorMatches("Required [lon]"), exampleMalformedValue("NaN,1.3").errorMatches("invalid latitude NaN; must be between -90.0 and 90.0"), @@ -603,7 +606,6 @@ protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) @Override protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed, boolean columnReader) { - assumeFalse("synthetic _source for geo_point doesn't support ignore_malformed", ignoreMalformed); return new SyntheticSourceSupport() { private final boolean ignoreZValue = usually(); private final GeoPoint nullValue = usually() ? null : randomGeoPoint(); @@ -611,41 +613,64 @@ protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed, @Override public SyntheticSourceExample example(int maxVals) { if (randomBoolean()) { - Tuple v = generateValue(); + Value v = generateValue(); + if (v.malformedOutput != null) { + return new SyntheticSourceExample(v.input, v.malformedOutput, null, this::mapping); + } + if (columnReader) { - return new SyntheticSourceExample(v.v1(), decode(encode(v.v2())), encode(v.v2()), this::mapping); + return new SyntheticSourceExample(v.input, decode(encode(v.output)), encode(v.output), this::mapping); } - return new SyntheticSourceExample(v.v1(), v.v2(), v.v2().toWKT(), this::mapping); + return new SyntheticSourceExample(v.input, v.output, v.output.toWKT(), this::mapping); + } - List> values = randomList(1, maxVals, this::generateValue); - // For the synthetic source tests, the results are sorted in order of encoded values, but for row-stride reader - // they are sorted in order of input, so we sort both input and expected here to support both types of tests - List> sorted = values.stream() - .sorted((a, b) -> Long.compare(encode(a.v2()), encode(b.v2()))) + List values = randomList(1, maxVals, this::generateValue); + List in = values.stream().map(Value::input).toList(); + + List outputFromDocValues = values.stream() + .filter(v -> v.malformedOutput == null) + .sorted((a, b) -> Long.compare(encode(a.output), encode(b.output))) + .map(Value::output) .toList(); - List in = sorted.stream().map(Tuple::v1).toList(); - List outList = sorted.stream().map(Tuple::v2).toList(); + + // Malformed values always come last in synthetic source + Stream malformedValues = values.stream().filter(v -> v.malformedOutput != null).map(Value::malformedOutput); + + List outList = Stream.concat(outputFromDocValues.stream(), malformedValues).toList(); Object out = outList.size() == 1 ? outList.get(0) : outList; if (columnReader) { // When reading doc-values, the block is a list of encoded longs - List outBlockList = outList.stream().map(this::encode).toList(); + List outBlockList = outputFromDocValues.stream().map(this::encode).toList(); Object outBlock = outBlockList.size() == 1 ? outBlockList.get(0) : outBlockList; return new SyntheticSourceExample(in, out, outBlock, this::mapping); } else { - // When reading row-stride, the block is a list of WKT encoded BytesRefs - List outBlockList = outList.stream().map(GeoPoint::toWKT).toList(); + // When reading row-stride, the block is a list of WKT encoded BytesRefs. + // Values are ordered in order of input. + List outBlockList = values.stream().filter(v -> v.malformedOutput == null).map(v -> v.output.toWKT()).toList(); Object outBlock = outBlockList.size() == 1 ? outBlockList.get(0) : outBlockList; return new SyntheticSourceExample(in, out, outBlock, this::mapping); } } - private Tuple generateValue() { + private record Value(Object input, GeoPoint output, Object malformedOutput) {} + + private Value generateValue() { if (nullValue != null && randomBoolean()) { - return Tuple.tuple(null, nullValue); + return new Value(null, nullValue, null); + } + if (ignoreMalformed && randomBoolean()) { + // Different malformed values are tested in #exampleMalformedValues(). + // Here the goal is to test inputs that contain mixed valid and malformed values. + List> choices = List.of( + () -> "not a valid geohash " + randomAlphaOfLength(3), + () -> Map.of("one", 1, "two", List.of(2, 22, 222), "three", Map.of("three", 33)) + ); + Object v = randomFrom(choices).get(); + return new Value(v, null, v); } GeoPoint point = randomGeoPoint(); - return Tuple.tuple(randomGeoPointInput(point), point); + return new Value(randomGeoPointInput(point), point, null); } private GeoPoint randomGeoPoint() { @@ -694,6 +719,9 @@ private void mapping(XContentBuilder b) throws IOException { if (rarely()) { b.field("store", false); } + if (ignoreMalformed) { + b.field("ignore_malformed", true); + } } @Override @@ -706,10 +734,6 @@ public List invalidExample() throws IOException { new SyntheticSourceInvalidExample( equalTo("field [field] of type [geo_point] doesn't support synthetic source because it declares copy_to"), b -> b.field("type", "geo_point").field("copy_to", "foo") - ), - new SyntheticSourceInvalidExample( - equalTo("field [field] of type [geo_point] doesn't support synthetic source because it ignores malformed points"), - b -> b.field("type", "geo_point").field("ignore_malformed", true) ) ); } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeParser.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeParser.java index 6e4593212716f..bbaf645aa0eb4 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeParser.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeParser.java @@ -16,7 +16,6 @@ import java.io.IOException; import java.text.ParseException; -import java.util.function.Consumer; class ShapeParser extends AbstractGeometryFieldMapper.Parser { private final GeometryParser geometryParser; @@ -26,18 +25,21 @@ class ShapeParser extends AbstractGeometryFieldMapper.Parser { } @Override - public void parse(XContentParser parser, CheckedConsumer consumer, Consumer onMalformed) - throws IOException { + public void parse( + XContentParser parser, + CheckedConsumer consumer, + AbstractGeometryFieldMapper.MalformedValueHandler malformedHandler + ) throws IOException { try { if (parser.currentToken() == XContentParser.Token.START_ARRAY) { while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - parse(parser, consumer, onMalformed); + parse(parser, consumer, malformedHandler); } } else { consumer.accept(geometryParser.parse(parser)); } } catch (ParseException | ElasticsearchParseException | IllegalArgumentException e) { - onMalformed.accept(e); + malformedHandler.notify(e); } } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapperTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapperTests.java index ad622109e1748..e4caa625e69df 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapperTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapperTests.java @@ -489,7 +489,7 @@ private Value generateValue() { return new Value(nullValue, null); } - if (ignoreMalformed) { + if (ignoreMalformed && randomBoolean()) { // #exampleMalformedValues() covers a lot of cases // nice complex object diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml index ccc6cd8627b53..700142cec9987 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml @@ -411,6 +411,86 @@ - match: { _source.point.lon: -71.34000029414892 } - match: { _source.point.lat: 41.119999922811985 } +--- +"geo_point with ignore_malformed": + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: introduced in 8.15.0 + test_runner_features: close_to + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + geo_point: + type: geo_point + ignore_malformed: true + + - do: + index: + index: test + id: "1" + body: + geo_point: + - string: "string" + array: [{ "a": 1 }, { "b": 2 }] + object: { "foo": "bar" } + - lat: 41.12 + lon: -71.34 + + - do: + index: + index: test + id: "2" + body: + geo_point: ["POINT (-71.34 41.12)", "potato", "POINT (-77.03653 38.897676)"] + + - do: + index: + index: test + id: "3" + body: + geo_point: ["POINT (-77.03653 1000)", "POINT (-71.34 41.12)"] + + - do: + indices.refresh: {} + + - do: + get: + index: test + id: "1" + + - close_to: { _source.geo_point.0.lon: { value: -71.34, error: 0.001 } } + - close_to: { _source.geo_point.0.lat: { value: 41.12, error: 0.001 } } + - match: { _source.geo_point.1.string: "string" } + - match: { _source.geo_point.1.array: [{ "a": 1 }, { "b": 2 }] } + - match: { _source.geo_point.1.object: { "foo": "bar" } } + + - do: + get: + index: test + id: "2" + + - close_to: { _source.geo_point.0.lon: { value: -77.03653, error: 0.0001 } } + - close_to: { _source.geo_point.0.lat: { value: 38.897676, error: 0.0001 } } + - close_to: { _source.geo_point.1.lon: { value: -71.34, error: 0.001 } } + - close_to: { _source.geo_point.1.lat: { value: 41.12, error: 0.001 } } + - match: { _source.geo_point.2: "potato" } + + - do: + get: + index: test + id: "3" + + - close_to: { _source.geo_point.0.lon: { value: -71.34, error: 0.001 } } + - close_to: { _source.geo_point.0.lat: { value: 41.12, error: 0.001 } } + - match: { _source.geo_point.1: "POINT (-77.03653 1000)" } + + --- "point": - requires: From b147d07533ae0d7954c69ac886f9585610acede2 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Tue, 18 Jun 2024 12:13:50 -0400 Subject: [PATCH 03/44] Muting PackageTests class #109852 (#109874) * Muting PackageTests class #109852 * fixing mute --- muted-tests.yml | 3 +-- .../java/org/elasticsearch/packaging/test/PackageTests.java | 2 -- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index ca017b0ec30a4..1348bff149ed0 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -68,8 +68,7 @@ tests: issue: "https://github.com/elastic/elasticsearch/issues/109838" method: "testShutdownReadinessService" - class: "org.elasticsearch.packaging.test.PackageTests" - issue: "https://github.com/elastic/elasticsearch/issues/109841" - method: "test50Remove" + issue: "https://github.com/elastic/elasticsearch/issues/109852" # Examples: # diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackageTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackageTests.java index 651bb6161017d..5c38fa36a6640 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackageTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackageTests.java @@ -210,7 +210,6 @@ public void test50Remove() throws Exception { assertThat(SYSTEMD_SERVICE, fileDoesNotExist()); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/109852") public void test60Reinstall() throws Exception { try { install(); @@ -224,7 +223,6 @@ public void test60Reinstall() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/109852") public void test70RestartServer() throws Exception { try { install(); From a6470fb86d0580d0ea20360125b6c3fea348892b Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Tue, 18 Jun 2024 17:45:02 +0100 Subject: [PATCH 04/44] Fix cluster level dense vector stats (#107962) The cluster level dense vector stats returns the total number of dense vector indices globally including the replicas. This commit fixes the total to only include the value count of the primary indices. This change aligns with the docs stats which also reports the number of primary documents when used in cluster stats. The indices stats API still reports granular results for replicas and primaries so the information is not lost. --- docs/reference/rest-api/common-parms.asciidoc | 4 ++++ .../rest-api-spec/test/cluster.stats/10_basic.yml | 8 ++------ .../action/admin/cluster/stats/ClusterStatsIndices.java | 2 +- .../elasticsearch/rest/action/cat/RestShardsAction.java | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index d745cd1670968..e537fc959965a 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -507,6 +507,10 @@ Return all statistics. `completion`:: <> statistics. +`dense_vector`:: +Total number of dense vectors indexed. +<> can affect this statistic. + `docs`:: Number of documents, number of deleted docs which have not yet merged out, and total size in bytes. <> can affect this statistic. diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_basic.yml index 3d7ac3041e0c8..cf43797a451e7 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_basic.yml @@ -258,14 +258,12 @@ --- "Dense vector stats": - requires: - cluster_features: [ "gte_v8.10.0" ] - reason: "dense vector stats added in 8.10" + cluster_features: [ "gte_v8.15.0" ] + reason: "dense vector stats reports from primary indices in 8.15" - do: indices.create: index: test1 body: - settings: - number_of_replicas: 0 mappings: properties: vector: @@ -283,8 +281,6 @@ indices.create: index: test2 body: - settings: - number_of_replicas: 0 mappings: properties: vector: diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java index a6a42d9d043cc..8e3b41a4876d4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java @@ -74,6 +74,7 @@ public ClusterStatsIndices( if (shardStats.getShardRouting().primary()) { indexShardStats.primaries++; docs.add(shardCommonStats.getDocs()); + denseVectorStats.add(shardCommonStats.getDenseVectorStats()); sparseVectorStats.add(shardCommonStats.getSparseVectorStats()); } store.add(shardCommonStats.getStore()); @@ -81,7 +82,6 @@ public ClusterStatsIndices( queryCache.add(shardCommonStats.getQueryCache()); completion.add(shardCommonStats.getCompletion()); segments.add(shardCommonStats.getSegments()); - denseVectorStats.add(shardCommonStats.getDenseVectorStats()); } searchUsageStats.add(r.searchUsageStats()); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index d563255e6abfc..fffa272d8fd12 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -254,7 +254,7 @@ protected Table getTableWithHeader(final RestRequest request) { ); table.addCell( "dense_vector.value_count", - "alias:dvc,denseVectorCount;default:false;text-align:right;desc:total count of indexed dense vector" + "alias:dvc,denseVectorCount;default:false;text-align:right;desc:number of indexed dense vectors in shard" ); table.addCell( "sparse_vector.value_count", From 411ff56458e589b579faa93457a8154053a1b06d Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Tue, 18 Jun 2024 13:16:28 -0400 Subject: [PATCH 05/44] Always refresh indices to ensure vectors are searchable in VectorSearchIT (#109864) There are some weird failures here and there indicating that a segment might not have been flushed and refreshed for search. Related: https://github.com/elastic/elasticsearch/issues/109628 --- .../java/org/elasticsearch/upgrades/VectorSearchIT.java | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/VectorSearchIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/VectorSearchIT.java index 7582d6ccb8c18..544cd652741c8 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/VectorSearchIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/VectorSearchIT.java @@ -56,8 +56,6 @@ public void testScriptByteVectorSearch() throws Exception { """; createIndex(SCRIPT_BYTE_INDEX_NAME, Settings.EMPTY, mapping); indexVectors(SCRIPT_BYTE_INDEX_NAME); - // refresh the index - client().performRequest(new Request("POST", "/" + SCRIPT_BYTE_INDEX_NAME + "/_refresh")); } // search with a script query Request searchRequest = new Request("POST", "/" + SCRIPT_BYTE_INDEX_NAME + "/_search"); @@ -107,8 +105,6 @@ public void testScriptVectorSearch() throws Exception { """; createIndex(SCRIPT_VECTOR_INDEX_NAME, Settings.EMPTY, mapping); indexVectors(SCRIPT_VECTOR_INDEX_NAME); - // refresh the index - client().performRequest(new Request("POST", "/" + SCRIPT_VECTOR_INDEX_NAME + "/_refresh")); } // search with a script query Request searchRequest = new Request("POST", "/" + SCRIPT_VECTOR_INDEX_NAME + "/_search"); @@ -237,7 +233,6 @@ public void testByteVectorSearch() throws Exception { // create index and index 10 random floating point vectors createIndex(BYTE_INDEX_NAME, Settings.EMPTY, mapping); indexVectors(BYTE_INDEX_NAME); - // refresh the index // force merge the index client().performRequest(new Request("POST", "/" + BYTE_INDEX_NAME + "/_forcemerge?max_num_segments=1")); } @@ -448,6 +443,8 @@ private void indexVectors(String indexName) throws Exception { indexRequest.setJsonEntity(vectors[i]); assertOK(client().performRequest(indexRequest)); } + // always refresh to ensure the data is visible + refresh(indexName); } private static Map search(Request request) throws IOException { From f2f16651c4e00f3e95afec77dde6a20bdd1392ad Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 18 Jun 2024 14:27:18 -0400 Subject: [PATCH 06/44] ESQL: Move serialization for more ScalarFunctions (#109708) This moves the serialization for more `ScalarFunction` subclasses, specifically binary comparisons and arithmetic. 12 down. 45 more to go. --- .../function/scalar/package-info.java | 6 +- .../predicate/operator/arithmetic/Add.java | 23 +++- .../DateTimeArithmeticOperation.java | 15 +++ .../predicate/operator/arithmetic/Div.java | 23 +++- .../arithmetic/EsqlArithmeticOperation.java | 35 ++++++ .../predicate/operator/arithmetic/Mod.java | 23 +++- .../predicate/operator/arithmetic/Mul.java | 23 +++- .../predicate/operator/arithmetic/Sub.java | 23 +++- .../predicate/operator/comparison/Equals.java | 12 ++ .../comparison/EsqlBinaryComparison.java | 27 +++++ .../operator/comparison/GreaterThan.java | 12 ++ .../comparison/GreaterThanOrEqual.java | 12 ++ .../InsensitiveBinaryComparison.java | 17 +++ .../comparison/InsensitiveEquals.java | 18 +++ .../operator/comparison/LessThan.java | 11 ++ .../operator/comparison/LessThanOrEqual.java | 12 ++ .../operator/comparison/NotEquals.java | 12 ++ .../xpack/esql/io/stream/PlanNamedTypes.java | 112 ++---------------- .../esql/plan/physical/HashJoinExec.java | 6 +- .../AbstractExpressionSerializationTests.java | 7 +- .../AbstractArithmeticSerializationTests.java | 43 +++++++ .../arithmetic/AddSerializationTests.java | 18 +++ .../arithmetic/DivSerializationTests.java | 18 +++ .../arithmetic/ModSerializationTests.java | 18 +++ .../arithmetic/MulSerializationTests.java | 18 +++ .../arithmetic/SubSerializationTests.java | 18 +++ .../AbstractComparisonSerializationTests.java | 42 +++++++ .../comparison/EqualsSerializationTests.java | 18 +++ .../GreaterThanOrEqualSerializationTests.java | 18 +++ .../GreaterThanSerializationTests.java | 18 +++ .../InsensitiveEqualsSerializationTests.java | 39 ++++++ .../LessThanOrEqualSerializationTests.java | 18 +++ .../LessThanSerializationTests.java | 18 +++ .../NotEqualsSerializationTests.java | 18 +++ .../esql/io/stream/PlanNamedTypesTests.java | 22 ++-- 35 files changed, 644 insertions(+), 129 deletions(-) create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractArithmeticSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractComparisonSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsSerializationTests.java diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java index 7e7a024ba2c4e..2e40ee1634d1b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java @@ -110,7 +110,9 @@ * Register your function for serialization. We're in the process of migrating this serialization * from an older way to the more common, {@link org.elasticsearch.common.io.stream.NamedWriteable}. *

- * All subclasses of {@link org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction} + * All subclasses of {@link org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction}, + * {@link org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison}, + * and {@link org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation} * are migrated and should include a "getWriteableName", "writeTo", and a deserializing constructor. * They should also include a {@link org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry} * and it should be linked in {@link org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction}. @@ -131,7 +133,7 @@ * *

  • * Now it's time to make a unit test! The infrastructure for these is under some flux at - * the moment, but it's good to extend from {@code AbstractScalarFunctionTestCase}. All of + * the moment, but it's good to extend from {@code AbstractFunctionTestCase}. All of * these tests are parameterized and expect to spend some time finding good parameters. *
  • *
  • diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java index b84082d410af3..fcf71900c4198 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.xpack.esql.core.expression.Expression; @@ -14,6 +16,7 @@ import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import java.io.IOException; import java.time.DateTimeException; import java.time.Duration; import java.time.Period; @@ -25,6 +28,7 @@ import static org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation.OperationSymbol.ADD; public class Add extends DateTimeArithmeticOperation implements BinaryComparisonInversible { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Add", Add::new); public Add(Source source, Expression left, Expression right) { super( @@ -35,11 +39,28 @@ public Add(Source source, Expression left, Expression right) { AddIntsEvaluator.Factory::new, AddLongsEvaluator.Factory::new, AddUnsignedLongsEvaluator.Factory::new, - (s, lhs, rhs) -> new AddDoublesEvaluator.Factory(source, lhs, rhs), + AddDoublesEvaluator.Factory::new, AddDatetimesEvaluator.Factory::new ); } + private Add(StreamInput in) throws IOException { + super( + in, + ADD, + AddIntsEvaluator.Factory::new, + AddLongsEvaluator.Factory::new, + AddUnsignedLongsEvaluator.Factory::new, + AddDoublesEvaluator.Factory::new, + AddDatetimesEvaluator.Factory::new + ); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, Add::new, left(), right()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java index 04a7b8a6067bd..45cc5b9bdc5c0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.ExceptionUtils; import org.elasticsearch.xpack.esql.core.expression.Expression; @@ -15,6 +16,7 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import java.io.IOException; import java.time.Duration; import java.time.Period; import java.time.temporal.TemporalAmount; @@ -52,6 +54,19 @@ interface DatetimeArithmeticEvaluator { this.datetimes = datetimes; } + DateTimeArithmeticOperation( + StreamInput in, + OperationSymbol op, + BinaryEvaluator ints, + BinaryEvaluator longs, + BinaryEvaluator ulongs, + BinaryEvaluator doubles, + DatetimeArithmeticEvaluator datetimes + ) throws IOException { + super(in, op, ints, longs, ulongs, doubles); + this.datetimes = datetimes; + } + @Override protected TypeResolution resolveInputType(Expression e, TypeResolutions.ParamOrdinal paramOrdinal) { return TypeResolutions.isType( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Div.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Div.java index 375a105f19529..6d84ce3558571 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Div.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Div.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.operator.arithmetic.BinaryComparisonInversible; @@ -15,10 +17,13 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.NumericUtils; +import java.io.IOException; + import static org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation.OperationSymbol.DIV; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.longToUnsignedLong; public class Div extends EsqlArithmeticOperation implements BinaryComparisonInversible { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Div", Div::new); private DataType type; @@ -35,11 +40,27 @@ public Div(Source source, Expression left, Expression right, DataType type) { DivIntsEvaluator.Factory::new, DivLongsEvaluator.Factory::new, DivUnsignedLongsEvaluator.Factory::new, - (s, lhs, rhs) -> new DivDoublesEvaluator.Factory(source, lhs, rhs) + DivDoublesEvaluator.Factory::new ); this.type = type; } + private Div(StreamInput in) throws IOException { + super( + in, + DIV, + DivIntsEvaluator.Factory::new, + DivLongsEvaluator.Factory::new, + DivUnsignedLongsEvaluator.Factory::new, + DivDoublesEvaluator.Factory::new + ); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public DataType dataType() { if (type == null) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java index 6d63551abd314..89931d7a6f4d1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; @@ -17,9 +19,12 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cast; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; import java.io.IOException; +import java.util.List; import java.util.function.Function; import static org.elasticsearch.common.logging.LoggerMessageFormat.format; @@ -29,6 +34,9 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; public abstract class EsqlArithmeticOperation extends ArithmeticOperation implements EvaluatorMapper { + public static List getNamedWriteables() { + return List.of(Add.ENTRY, Div.ENTRY, Mod.ENTRY, Mul.ENTRY, Sub.ENTRY); + } /** * The only role of this enum is to fit the super constructor that expects a BinaryOperation which is @@ -99,6 +107,33 @@ public interface BinaryEvaluator { this.doubles = doubles; } + EsqlArithmeticOperation( + StreamInput in, + OperationSymbol op, + BinaryEvaluator ints, + BinaryEvaluator longs, + BinaryEvaluator ulongs, + BinaryEvaluator doubles + ) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + ((PlanStreamInput) in).readExpression(), + ((PlanStreamInput) in).readExpression(), + op, + ints, + longs, + ulongs, + doubles + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + ((PlanStreamOutput) out).writeExpression(left()); + ((PlanStreamOutput) out).writeExpression(right()); + } + @Override public Object fold() { return EvaluatorMapper.super.fold(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Mod.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Mod.java index bfa10eef9a1c6..151c886bfdf8d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Mod.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Mod.java @@ -7,16 +7,21 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.util.NumericUtils; +import java.io.IOException; + import static org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation.OperationSymbol.MOD; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.longToUnsignedLong; public class Mod extends EsqlArithmeticOperation { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Mod", Mod::new); public Mod(Source source, Expression left, Expression right) { super( @@ -27,10 +32,26 @@ public Mod(Source source, Expression left, Expression right) { ModIntsEvaluator.Factory::new, ModLongsEvaluator.Factory::new, ModUnsignedLongsEvaluator.Factory::new, - (s, lhs, rhs) -> new ModDoublesEvaluator.Factory(source, lhs, rhs) + ModDoublesEvaluator.Factory::new + ); + } + + private Mod(StreamInput in) throws IOException { + super( + in, + MOD, + ModIntsEvaluator.Factory::new, + ModLongsEvaluator.Factory::new, + ModUnsignedLongsEvaluator.Factory::new, + ModDoublesEvaluator.Factory::new ); } + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, Mod::new, left(), right()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Mul.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Mul.java index efb0b7dbfdc44..08a01fbffcca2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Mul.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Mul.java @@ -7,16 +7,21 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.operator.arithmetic.BinaryComparisonInversible; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import java.io.IOException; + import static org.elasticsearch.xpack.esql.core.util.NumericUtils.unsignedLongMultiplyExact; import static org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation.OperationSymbol.MUL; public class Mul extends EsqlArithmeticOperation implements BinaryComparisonInversible { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Mul", Mul::new); public Mul(Source source, Expression left, Expression right) { super( @@ -27,10 +32,26 @@ public Mul(Source source, Expression left, Expression right) { MulIntsEvaluator.Factory::new, MulLongsEvaluator.Factory::new, MulUnsignedLongsEvaluator.Factory::new, - (s, lhs, rhs) -> new MulDoublesEvaluator.Factory(source, lhs, rhs) + MulDoublesEvaluator.Factory::new + ); + } + + private Mul(StreamInput in) throws IOException { + super( + in, + MUL, + MulIntsEvaluator.Factory::new, + MulLongsEvaluator.Factory::new, + MulUnsignedLongsEvaluator.Factory::new, + MulDoublesEvaluator.Factory::new ); } + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public ArithmeticOperationFactory binaryComparisonInverse() { return Div::new; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java index b2ae8cff6a697..43398b7750b0d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.xpack.esql.core.expression.Expression; @@ -16,6 +18,7 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import java.io.IOException; import java.time.DateTimeException; import java.time.Duration; import java.time.Period; @@ -28,6 +31,7 @@ import static org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation.OperationSymbol.SUB; public class Sub extends DateTimeArithmeticOperation implements BinaryComparisonInversible { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Sub", Sub::new); public Sub(Source source, Expression left, Expression right) { super( @@ -38,11 +42,28 @@ public Sub(Source source, Expression left, Expression right) { SubIntsEvaluator.Factory::new, SubLongsEvaluator.Factory::new, SubUnsignedLongsEvaluator.Factory::new, - (s, lhs, rhs) -> new SubDoublesEvaluator.Factory(source, lhs, rhs), + SubDoublesEvaluator.Factory::new, SubDatetimesEvaluator.Factory::new ); } + private Sub(StreamInput in) throws IOException { + super( + in, + SUB, + SubIntsEvaluator.Factory::new, + SubLongsEvaluator.Factory::new, + SubUnsignedLongsEvaluator.Factory::new, + SubDoublesEvaluator.Factory::new, + SubDatetimesEvaluator.Factory::new + ); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected TypeResolution resolveType() { TypeResolution resolution = super.resolveType(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/Equals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/Equals.java index e73cf91cd52a8..26a74e7bdb03c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/Equals.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/Equals.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.Negatable; @@ -19,6 +20,12 @@ import java.util.Map; public class Equals extends EsqlBinaryComparison implements Negatable { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "Equals", + EsqlBinaryComparison::readFrom + ); + private static final Map evaluatorMap = Map.ofEntries( Map.entry(DataType.BOOLEAN, EqualsBoolsEvaluator.Factory::new), Map.entry(DataType.INTEGER, EqualsIntsEvaluator.Factory::new), @@ -44,6 +51,11 @@ public Equals(Source source, Expression left, Expression right, ZoneId zoneId) { super(source, left, right, BinaryComparisonOperation.EQ, zoneId, evaluatorMap); } + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, Equals::new, left(), right(), zoneId()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparison.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparison.java index 41dafecbff76e..a4559e10eaf3a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparison.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparison.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -21,10 +22,13 @@ import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cast; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; import java.io.IOException; import java.time.ZoneId; +import java.util.List; import java.util.Map; import java.util.function.Function; @@ -32,6 +36,9 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; public abstract class EsqlBinaryComparison extends BinaryComparison implements EvaluatorMapper { + public static List getNamedWriteables() { + return List.of(Equals.ENTRY, GreaterThan.ENTRY, GreaterThanOrEqual.ENTRY, LessThan.ENTRY, LessThanOrEqual.ENTRY, NotEquals.ENTRY); + } private final Map evaluatorMap; @@ -118,6 +125,26 @@ protected EsqlBinaryComparison( this.functionType = operation; } + public static EsqlBinaryComparison readFrom(StreamInput in) throws IOException { + // TODO this uses a constructor on the operation *and* a name which is confusing. It only needs one. Everything else uses a name. + var source = Source.readFrom((PlanStreamInput) in); + EsqlBinaryComparison.BinaryComparisonOperation operation = EsqlBinaryComparison.BinaryComparisonOperation.readFromStream(in); + var left = ((PlanStreamInput) in).readExpression(); + var right = ((PlanStreamInput) in).readExpression(); + // TODO: Remove zoneId entirely + var zoneId = in.readOptionalZoneId(); + return operation.buildNewInstance(source, left, right); + } + + @Override + public final void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + functionType.writeTo(out); + ((PlanStreamOutput) out).writeExpression(left()); + ((PlanStreamOutput) out).writeExpression(right()); + out.writeOptionalZoneId(zoneId()); + } + public BinaryComparisonOperation getFunctionType() { return functionType; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThan.java index da639b328b7c2..8ce8bf30ef617 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThan.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.Negatable; @@ -19,6 +20,12 @@ import java.util.Map; public class GreaterThan extends EsqlBinaryComparison implements Negatable { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "GreaterThan", + EsqlBinaryComparison::readFrom + ); + private static final Map evaluatorMap = Map.ofEntries( Map.entry(DataType.INTEGER, GreaterThanIntsEvaluator.Factory::new), Map.entry(DataType.DOUBLE, GreaterThanDoublesEvaluator.Factory::new), @@ -39,6 +46,11 @@ public GreaterThan(Source source, Expression left, Expression right, ZoneId zone super(source, left, right, BinaryComparisonOperation.GT, zoneId, evaluatorMap); } + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, GreaterThan::new, left(), right(), zoneId()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqual.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqual.java index 0644cd5df9038..d7bfec75dabfc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqual.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqual.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.Negatable; @@ -19,6 +20,12 @@ import java.util.Map; public class GreaterThanOrEqual extends EsqlBinaryComparison implements Negatable { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "GreaterThanOrEqual", + EsqlBinaryComparison::readFrom + ); + private static final Map evaluatorMap = Map.ofEntries( Map.entry(DataType.INTEGER, GreaterThanOrEqualIntsEvaluator.Factory::new), Map.entry(DataType.DOUBLE, GreaterThanOrEqualDoublesEvaluator.Factory::new), @@ -39,6 +46,11 @@ public GreaterThanOrEqual(Source source, Expression left, Expression right, Zone super(source, left, right, BinaryComparisonOperation.GTE, zoneId, evaluatorMap); } + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, GreaterThanOrEqual::new, left(), right(), zoneId()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveBinaryComparison.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveBinaryComparison.java index 9302f6e9c5a77..137723de24edd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveBinaryComparison.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveBinaryComparison.java @@ -6,10 +6,16 @@ */ package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.function.scalar.BinaryScalarFunction; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; + +import java.io.IOException; public abstract class InsensitiveBinaryComparison extends BinaryScalarFunction { @@ -17,6 +23,17 @@ protected InsensitiveBinaryComparison(Source source, Expression left, Expression super(source, left, right); } + protected InsensitiveBinaryComparison(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in), ((PlanStreamInput) in).readExpression(), ((PlanStreamInput) in).readExpression()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + ((PlanStreamOutput) out).writeExpression(left()); + ((PlanStreamOutput) out).writeExpression(right()); + } + @Override public DataType dataType() { return DataType.BOOLEAN; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEquals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEquals.java index 5711495dc29eb..c731e44197f2e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEquals.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEquals.java @@ -9,6 +9,8 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.ByteRunAutomaton; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.lucene.search.AutomatonQueries; import org.elasticsearch.compute.ann.Evaluator; @@ -18,12 +20,28 @@ import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import java.io.IOException; + public class InsensitiveEquals extends InsensitiveBinaryComparison { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "InsensitiveEquals", + InsensitiveEquals::new + ); public InsensitiveEquals(Source source, Expression left, Expression right) { super(source, left, right); } + private InsensitiveEquals(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, InsensitiveEquals::new, left(), right()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThan.java index 8c6824a9827d0..b1562b6dc2be4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThan.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.Negatable; @@ -19,6 +20,11 @@ import java.util.Map; public class LessThan extends EsqlBinaryComparison implements Negatable { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "LessThan", + EsqlBinaryComparison::readFrom + ); private static final Map evaluatorMap = Map.ofEntries( Map.entry(DataType.INTEGER, LessThanIntsEvaluator.Factory::new), @@ -40,6 +46,11 @@ public LessThan(Source source, Expression left, Expression right, ZoneId zoneId) super(source, left, right, BinaryComparisonOperation.LT, zoneId, evaluatorMap); } + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, LessThan::new, left(), right(), zoneId()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqual.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqual.java index 0a18c44ea2891..c31e055c8dd1a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqual.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqual.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.Negatable; @@ -19,6 +20,12 @@ import java.util.Map; public class LessThanOrEqual extends EsqlBinaryComparison implements Negatable { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "LessThanOrEqual", + EsqlBinaryComparison::readFrom + ); + private static final Map evaluatorMap = Map.ofEntries( Map.entry(DataType.INTEGER, LessThanOrEqualIntsEvaluator.Factory::new), Map.entry(DataType.DOUBLE, LessThanOrEqualDoublesEvaluator.Factory::new), @@ -39,6 +46,11 @@ public LessThanOrEqual(Source source, Expression left, Expression right, ZoneId super(source, left, right, BinaryComparisonOperation.LTE, zoneId, evaluatorMap); } + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, LessThanOrEqual::new, left(), right(), zoneId()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEquals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEquals.java index 0a60a6da818c1..179ff61d9c017 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEquals.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEquals.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.Negatable; @@ -19,6 +20,12 @@ import java.util.Map; public class NotEquals extends EsqlBinaryComparison implements Negatable { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "NotEquals", + EsqlBinaryComparison::readFrom + ); + private static final Map evaluatorMap = Map.ofEntries( Map.entry(DataType.BOOLEAN, NotEqualsBoolsEvaluator.Factory::new), Map.entry(DataType.INTEGER, NotEqualsIntsEvaluator.Factory::new), @@ -44,6 +51,11 @@ public NotEquals(Source source, Expression left, Expression right, ZoneId zoneId super(source, left, right, BinaryComparisonOperation.NEQ, zoneId, evaluatorMap); } + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Evaluator(extraName = "Ints") static boolean processInts(int lhs, int rhs) { return lhs != rhs; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index 0629af2c17980..59cbfca89112f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -36,7 +36,6 @@ import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNull; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.arithmetic.ArithmeticOperation; import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RLikePattern; import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RegexMatch; import org.elasticsearch.xpack.esql.core.expression.predicate.regex.WildcardPattern; @@ -116,20 +115,10 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.string.ToLower; import org.elasticsearch.xpack.esql.expression.function.scalar.string.ToUpper; import org.elasticsearch.xpack.esql.expression.function.scalar.string.WildcardLike; -import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; -import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; -import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mod; -import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; -import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Sub; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.InsensitiveEquals; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Dissect.Parser; @@ -262,20 +251,6 @@ public static List namedTypeEntries() { of(LogicalPlan.class, OrderBy.class, PlanNamedTypes::writeOrderBy, PlanNamedTypes::readOrderBy), of(LogicalPlan.class, Project.class, PlanNamedTypes::writeProject, PlanNamedTypes::readProject), of(LogicalPlan.class, TopN.class, PlanNamedTypes::writeTopN, PlanNamedTypes::readTopN), - // BinaryComparison - of(EsqlBinaryComparison.class, Equals.class, PlanNamedTypes::writeBinComparison, PlanNamedTypes::readBinComparison), - of(EsqlBinaryComparison.class, NotEquals.class, PlanNamedTypes::writeBinComparison, PlanNamedTypes::readBinComparison), - of(EsqlBinaryComparison.class, GreaterThan.class, PlanNamedTypes::writeBinComparison, PlanNamedTypes::readBinComparison), - of(EsqlBinaryComparison.class, GreaterThanOrEqual.class, PlanNamedTypes::writeBinComparison, PlanNamedTypes::readBinComparison), - of(EsqlBinaryComparison.class, LessThan.class, PlanNamedTypes::writeBinComparison, PlanNamedTypes::readBinComparison), - of(EsqlBinaryComparison.class, LessThanOrEqual.class, PlanNamedTypes::writeBinComparison, PlanNamedTypes::readBinComparison), - // InsensitiveEquals - of( - InsensitiveEquals.class, - InsensitiveEquals.class, - PlanNamedTypes::writeInsensitiveEquals, - PlanNamedTypes::readInsensitiveEquals - ), // InComparison of(ScalarFunction.class, In.class, PlanNamedTypes::writeInComparison, PlanNamedTypes::readInComparison), // RegexMatch @@ -324,12 +299,6 @@ public static List namedTypeEntries() { of(ScalarFunction.class, Replace.class, PlanNamedTypes::writeReplace, PlanNamedTypes::readReplace), of(ScalarFunction.class, ToLower.class, PlanNamedTypes::writeToLower, PlanNamedTypes::readToLower), of(ScalarFunction.class, ToUpper.class, PlanNamedTypes::writeToUpper, PlanNamedTypes::readToUpper), - // ArithmeticOperations - of(ArithmeticOperation.class, Add.class, PlanNamedTypes::writeArithmeticOperation, PlanNamedTypes::readArithmeticOperation), - of(ArithmeticOperation.class, Sub.class, PlanNamedTypes::writeArithmeticOperation, PlanNamedTypes::readArithmeticOperation), - of(ArithmeticOperation.class, Mul.class, PlanNamedTypes::writeArithmeticOperation, PlanNamedTypes::readArithmeticOperation), - of(ArithmeticOperation.class, Div.class, PlanNamedTypes::writeArithmeticOperation, PlanNamedTypes::readArithmeticOperation), - of(ArithmeticOperation.class, Mod.class, PlanNamedTypes::writeArithmeticOperation, PlanNamedTypes::readArithmeticOperation), // GroupingFunctions of(GroupingFunction.class, Bucket.class, PlanNamedTypes::writeBucket, PlanNamedTypes::readBucket), // AggregateFunctions @@ -363,18 +332,18 @@ public static List namedTypeEntries() { List entries = new ArrayList<>(declared); // From NamedWriteables - for (NamedWriteableRegistry.Entry e : UnaryScalarFunction.getNamedWriteables()) { - entries.add(of(ESQL_UNARY_SCLR_CLS, e)); - } - for (NamedWriteableRegistry.Entry e : NamedExpression.getNamedWriteables()) { - entries.add(of(Expression.class, e)); - } - for (NamedWriteableRegistry.Entry e : FullTextPredicate.getNamedWriteables()) { - entries.add(of(Expression.class, e)); + for (List ee : List.of( + EsqlArithmeticOperation.getNamedWriteables(), + EsqlBinaryComparison.getNamedWriteables(), + FullTextPredicate.getNamedWriteables(), + NamedExpression.getNamedWriteables(), + UnaryScalarFunction.getNamedWriteables(), + List.of(UnsupportedAttribute.ENTRY, InsensitiveEquals.ENTRY, Literal.ENTRY, org.elasticsearch.xpack.esql.expression.Order.ENTRY) + )) { + for (NamedWriteableRegistry.Entry e : ee) { + entries.add(of(Expression.class, e)); + } } - entries.add(of(Expression.class, UnsupportedAttribute.ENTRY)); - entries.add(of(Expression.class, Literal.ENTRY)); - entries.add(of(Expression.class, org.elasticsearch.xpack.esql.expression.Order.ENTRY)); return entries; } @@ -1002,40 +971,6 @@ static void writeTopN(PlanStreamOutput out, TopN topN) throws IOException { out.writeExpression(topN.limit()); } - // -- BinaryComparison - - public static EsqlBinaryComparison readBinComparison(PlanStreamInput in, String name) throws IOException { - var source = Source.readFrom(in); - EsqlBinaryComparison.BinaryComparisonOperation operation = EsqlBinaryComparison.BinaryComparisonOperation.readFromStream(in); - var left = in.readExpression(); - var right = in.readExpression(); - // TODO: Remove zoneId entirely - var zoneId = in.readOptionalZoneId(); - return operation.buildNewInstance(source, left, right); - } - - public static void writeBinComparison(PlanStreamOutput out, EsqlBinaryComparison binaryComparison) throws IOException { - binaryComparison.source().writeTo(out); - binaryComparison.getFunctionType().writeTo(out); - out.writeExpression(binaryComparison.left()); - out.writeExpression(binaryComparison.right()); - out.writeOptionalZoneId(binaryComparison.zoneId()); - } - - // -- InsensitiveEquals - static InsensitiveEquals readInsensitiveEquals(PlanStreamInput in, String name) throws IOException { - var source = Source.readFrom(in); - var left = in.readExpression(); - var right = in.readExpression(); - return new InsensitiveEquals(source, left, right); - } - - static void writeInsensitiveEquals(PlanStreamOutput out, InsensitiveEquals eq) throws IOException { - eq.source().writeTo(out); - out.writeExpression(eq.left()); - out.writeExpression(eq.right()); - } - // -- InComparison static In readInComparison(PlanStreamInput in) throws IOException { @@ -1472,29 +1407,6 @@ static void writeCIDRMatch(PlanStreamOutput out, CIDRMatch cidrMatch) throws IOE out.writeCollection(children.subList(1, children.size()), writerFromPlanWriter(PlanStreamOutput::writeExpression)); } - // -- ArithmeticOperations - - static final Map> ARITHMETIC_CTRS = Map.ofEntries( - entry(name(Add.class), Add::new), - entry(name(Sub.class), Sub::new), - entry(name(Mul.class), Mul::new), - entry(name(Div.class), Div::new), - entry(name(Mod.class), Mod::new) - ); - - static ArithmeticOperation readArithmeticOperation(PlanStreamInput in, String name) throws IOException { - var source = Source.readFrom(in); - var left = in.readExpression(); - var right = in.readExpression(); - return ARITHMETIC_CTRS.get(name).apply(source, left, right); - } - - static void writeArithmeticOperation(PlanStreamOutput out, ArithmeticOperation arithmeticOperation) throws IOException { - arithmeticOperation.source().writeTo(out); - out.writeExpression(arithmeticOperation.left()); - out.writeExpression(arithmeticOperation.right()); - } - // -- Aggregations static final Map> AGG_CTRS = Map.ofEntries( entry(name(Avg.class), Avg::new), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java index 7c124701fe332..dff0a6f0eade3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java @@ -13,7 +13,7 @@ import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.esql.io.stream.PlanNamedTypes; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; @@ -53,7 +53,7 @@ public HashJoinExec(PlanStreamInput in) throws IOException { super(Source.readFrom(in), in.readPhysicalPlanNode()); this.joinData = new LocalSourceExec(in); this.matchFields = in.readNamedWriteableCollectionAsList(NamedExpression.class); - this.conditions = in.readCollectionAsList(i -> (Equals) PlanNamedTypes.readBinComparison(in, "equals")); + this.conditions = in.readCollectionAsList(i -> (Equals) EsqlBinaryComparison.readFrom(in)); this.output = in.readNamedWriteableCollectionAsList(Attribute.class); } @@ -62,7 +62,7 @@ public void writeTo(PlanStreamOutput out) throws IOException { out.writePhysicalPlanNode(child()); joinData.writeTo(out); out.writeNamedWriteableCollection(matchFields); - out.writeCollection(conditions, (o, v) -> PlanNamedTypes.writeBinComparison(out, v)); + out.writeCollection(conditions, (o, v) -> v.writeTo(o)); out.writeNamedWriteableCollection(output); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java index d0203419f01ab..5a794c3ff7730 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java @@ -25,10 +25,8 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import java.util.Map; -import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.sameInstance; @@ -48,10 +46,7 @@ public static Expression randomChild() { @Override protected final T copyInstance(T instance, TransportVersion version) throws IOException { - EsqlConfiguration config = EsqlConfigurationSerializationTests.randomConfiguration( - Arrays.stream(EXAMPLE_QUERY).collect(Collectors.joining("\n")), - Map.of() - ); + EsqlConfiguration config = EsqlConfigurationSerializationTests.randomConfiguration(String.join("\n", EXAMPLE_QUERY), Map.of()); return copyInstance( instance, getNamedWriteableRegistry(), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractArithmeticSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractArithmeticSerializationTests.java new file mode 100644 index 0000000000000..c9a7933142605 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractArithmeticSerializationTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; +import java.util.List; + +public abstract class AbstractArithmeticSerializationTests extends AbstractExpressionSerializationTests< + T> { + protected abstract T create(Source source, Expression left, Expression right); + + @Override + protected final T createTestInstance() { + return create(randomSource(), randomChild(), randomChild()); + } + + @Override + protected final T mutateInstance(T instance) throws IOException { + Expression left = instance.left(); + Expression right = instance.right(); + if (randomBoolean()) { + left = randomValueOtherThan(instance.left(), AbstractExpressionSerializationTests::randomChild); + } else { + right = randomValueOtherThan(instance.right(), AbstractExpressionSerializationTests::randomChild); + } + return create(instance.source(), left, right); + } + + @Override + protected List getNamedWriteables() { + return EsqlArithmeticOperation.getNamedWriteables(); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddSerializationTests.java new file mode 100644 index 0000000000000..b8924a01d0904 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddSerializationTests.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; + +public class AddSerializationTests extends AbstractArithmeticSerializationTests { + @Override + protected Add create(Source source, Expression left, Expression right) { + return new Add(source, left, right); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivSerializationTests.java new file mode 100644 index 0000000000000..b7e01eb835a47 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivSerializationTests.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; + +public class DivSerializationTests extends AbstractArithmeticSerializationTests
    { + @Override + protected Div create(Source source, Expression left, Expression right) { + return new Div(source, left, right); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModSerializationTests.java new file mode 100644 index 0000000000000..a0f072635db10 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModSerializationTests.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; + +public class ModSerializationTests extends AbstractArithmeticSerializationTests { + @Override + protected Mod create(Source source, Expression left, Expression right) { + return new Mod(source, left, right); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulSerializationTests.java new file mode 100644 index 0000000000000..d6eb1e3cf3cf0 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulSerializationTests.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; + +public class MulSerializationTests extends AbstractArithmeticSerializationTests { + @Override + protected Mul create(Source source, Expression left, Expression right) { + return new Mul(source, left, right); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubSerializationTests.java new file mode 100644 index 0000000000000..274d7f669b2aa --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubSerializationTests.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; + +public class SubSerializationTests extends AbstractArithmeticSerializationTests { + @Override + protected Sub create(Source source, Expression left, Expression right) { + return new Sub(source, left, right); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractComparisonSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractComparisonSerializationTests.java new file mode 100644 index 0000000000000..8f28cfddb1d3a --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractComparisonSerializationTests.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; +import java.util.List; + +public abstract class AbstractComparisonSerializationTests extends AbstractExpressionSerializationTests { + protected abstract T create(Source source, Expression left, Expression right); + + @Override + protected final T createTestInstance() { + return create(randomSource(), randomChild(), randomChild()); + } + + @Override + protected final T mutateInstance(T instance) throws IOException { + Expression left = instance.left(); + Expression right = instance.right(); + if (randomBoolean()) { + left = randomValueOtherThan(instance.left(), AbstractExpressionSerializationTests::randomChild); + } else { + right = randomValueOtherThan(instance.right(), AbstractExpressionSerializationTests::randomChild); + } + return create(instance.source(), left, right); + } + + @Override + protected List getNamedWriteables() { + return EsqlBinaryComparison.getNamedWriteables(); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsSerializationTests.java new file mode 100644 index 0000000000000..cfeb8ce88ec87 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsSerializationTests.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; + +public class EqualsSerializationTests extends AbstractComparisonSerializationTests { + @Override + protected Equals create(Source source, Expression left, Expression right) { + return new Equals(source, left, right); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualSerializationTests.java new file mode 100644 index 0000000000000..b8f9d510fb33a --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualSerializationTests.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; + +public class GreaterThanOrEqualSerializationTests extends AbstractComparisonSerializationTests { + @Override + protected GreaterThanOrEqual create(Source source, Expression left, Expression right) { + return new GreaterThanOrEqual(source, left, right); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanSerializationTests.java new file mode 100644 index 0000000000000..93352f7f9d3e0 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanSerializationTests.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; + +public class GreaterThanSerializationTests extends AbstractComparisonSerializationTests { + @Override + protected GreaterThan create(Source source, Expression left, Expression right) { + return new GreaterThan(source, left, right); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsSerializationTests.java new file mode 100644 index 0000000000000..d9daa27936267 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsSerializationTests.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; +import java.util.List; + +public class InsensitiveEqualsSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected final InsensitiveEquals createTestInstance() { + return new InsensitiveEquals(randomSource(), randomChild(), randomChild()); + } + + @Override + protected final InsensitiveEquals mutateInstance(InsensitiveEquals instance) throws IOException { + Expression left = instance.left(); + Expression right = instance.right(); + if (randomBoolean()) { + left = randomValueOtherThan(instance.left(), AbstractExpressionSerializationTests::randomChild); + } else { + right = randomValueOtherThan(instance.right(), AbstractExpressionSerializationTests::randomChild); + } + return new InsensitiveEquals(instance.source(), left, right); + } + + @Override + protected List getNamedWriteables() { + return List.of(InsensitiveEquals.ENTRY); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualSerializationTests.java new file mode 100644 index 0000000000000..f7580a23bf47d --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualSerializationTests.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; + +public class LessThanOrEqualSerializationTests extends AbstractComparisonSerializationTests { + @Override + protected LessThanOrEqual create(Source source, Expression left, Expression right) { + return new LessThanOrEqual(source, left, right); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanSerializationTests.java new file mode 100644 index 0000000000000..220f56ebb7c00 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanSerializationTests.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; + +public class LessThanSerializationTests extends AbstractComparisonSerializationTests { + @Override + protected LessThan create(Source source, Expression left, Expression right) { + return new LessThan(source, left, right); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsSerializationTests.java new file mode 100644 index 0000000000000..be6c880e08736 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsSerializationTests.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; + +public class NotEqualsSerializationTests extends AbstractComparisonSerializationTests { + @Override + protected NotEquals create(Source source, Expression left, Expression right) { + return new NotEquals(source, left, right); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java index 54490ba306da8..ed67275a90468 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java @@ -19,11 +19,11 @@ import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.SerializationTestUtils; import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.expression.Nullability; -import org.elasticsearch.xpack.esql.core.expression.function.Function; import org.elasticsearch.xpack.esql.core.expression.predicate.operator.arithmetic.ArithmeticOperation; import org.elasticsearch.xpack.esql.core.index.EsIndex; import org.elasticsearch.xpack.esql.core.plan.logical.Filter; @@ -194,12 +194,12 @@ public void testLogicalPlanEntries() { public void testFunctionEntries() { var serializableFunctions = PlanNamedTypes.namedTypeEntries() .stream() - .filter(e -> Function.class.isAssignableFrom(e.categoryClass())) + .filter(e -> Expression.class.isAssignableFrom(e.categoryClass())) .map(PlanNameRegistry.Entry::name) .sorted() .toList(); - for (var function : (new EsqlFunctionRegistry()).listFunctions()) { + for (var function : new EsqlFunctionRegistry().listFunctions()) { assertThat(serializableFunctions, hasItem(equalTo(PlanNamedTypes.name(function.clazz())))); } } @@ -233,15 +233,13 @@ public void testBinComparisonSimple() throws IOException { var orig = new Equals(Source.EMPTY, field("foo", DataType.DOUBLE), field("bar", DataType.DOUBLE)); BytesStreamOutput bso = new BytesStreamOutput(); PlanStreamOutput out = new PlanStreamOutput(bso, planNameRegistry, null); - out.writeNamed(EsqlBinaryComparison.class, orig); - var deser = (Equals) planStreamInput(bso).readNamed(EsqlBinaryComparison.class); + out.writeNamed(Expression.class, orig); + var deser = (Equals) planStreamInput(bso).readNamed(Expression.class); EqualsHashCodeTestUtils.checkEqualsAndHashCode(orig, unused -> deser); } public void testBinComparison() { - Stream.generate(PlanNamedTypesTests::randomBinaryComparison) - .limit(100) - .forEach(obj -> assertNamedType(EsqlBinaryComparison.class, obj)); + Stream.generate(PlanNamedTypesTests::randomBinaryComparison).limit(100).forEach(obj -> assertNamedType(Expression.class, obj)); } public void testAggFunctionSimple() throws IOException { @@ -261,15 +259,13 @@ public void testArithmeticOperationSimple() throws IOException { var orig = new Add(Source.EMPTY, field("foo", DataType.LONG), field("bar", DataType.LONG)); BytesStreamOutput bso = new BytesStreamOutput(); PlanStreamOutput out = new PlanStreamOutput(bso, planNameRegistry, null); - out.writeNamed(ArithmeticOperation.class, orig); - var deser = (Add) planStreamInput(bso).readNamed(ArithmeticOperation.class); + out.writeNamed(Expression.class, orig); + var deser = (Add) planStreamInput(bso).readNamed(Expression.class); EqualsHashCodeTestUtils.checkEqualsAndHashCode(orig, unused -> deser); } public void testArithmeticOperation() { - Stream.generate(PlanNamedTypesTests::randomArithmeticOperation) - .limit(100) - .forEach(obj -> assertNamedType(ArithmeticOperation.class, obj)); + Stream.generate(PlanNamedTypesTests::randomArithmeticOperation).limit(100).forEach(obj -> assertNamedType(Expression.class, obj)); } public void testSubStringSimple() throws IOException { From e6301e5c98dbdc14b761681e6953743a883c6685 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Tue, 18 Jun 2024 20:47:21 +0200 Subject: [PATCH 07/44] AwaitsFix: https://github.com/elastic/elasticsearch/issues/104807 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 1348bff149ed0..b533a29bfea1e 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -69,6 +69,9 @@ tests: method: "testShutdownReadinessService" - class: "org.elasticsearch.packaging.test.PackageTests" issue: "https://github.com/elastic/elasticsearch/issues/109852" +- class: "org.elasticsearch.cluster.PrevalidateShardPathIT" + issue: "https://github.com/elastic/elasticsearch/issues/104807" + method: "testCheckShards" # Examples: # From 23fc1513d48ec1c139d44c41f20e69d0e8bc6971 Mon Sep 17 00:00:00 2001 From: Max Hniebergall <137079448+maxhniebergall@users.noreply.github.com> Date: Tue, 18 Jun 2024 14:50:50 -0400 Subject: [PATCH 08/44] [ML] StartTrainedModelDeployment Request query params override body params (#109487) * Perform checks on request values regardless of the existance of a body * Update docs/changelog/109487.yaml * Update yaml summary * Fix yaml summary * Check for body and query parameters, verify they are same if set * update yaml * add same param check for default cache size * Fix (remove) defaults * handle null request wait for state * Refactor handling of default values * Fix tests; setDefaults in toXContent to avoid serialization errors * Fix test by correctly checking for null allocations or threads * update yaml * Change rest action creation to assume that body parameters which are equal to the defaults were not specified in the body. This means that errors will not be thrown if the body and query parameters don't match as long as the body parameter is the same as the default. * fix bad merge * Fixes from review: remove extraneous enums, genersize param validation * added javadoc and renamed validateParameters function --------- Co-authored-by: Elastic Machine --- docs/changelog/109487.yaml | 5 + .../StartTrainedModelDeploymentAction.java | 15 +- ...RestStartTrainedModelDeploymentAction.java | 146 +++++++++++++----- ...tartTrainedModelDeploymentActionTests.java | 31 ++++ 4 files changed, 152 insertions(+), 45 deletions(-) create mode 100644 docs/changelog/109487.yaml diff --git a/docs/changelog/109487.yaml b/docs/changelog/109487.yaml new file mode 100644 index 0000000000000..c69c77203f12d --- /dev/null +++ b/docs/changelog/109487.yaml @@ -0,0 +1,5 @@ +pr: 109487 +summary: Start Trained Model Deployment API request query params now override body params +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java index b3cf9f16c3c82..ca9b86a90f875 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java @@ -67,6 +67,11 @@ public class StartTrainedModelDeploymentAction extends ActionType r.paramAsInt(s, request.getNumberOfAllocations()), - request::setNumberOfAllocations - ); - RestCompatibilityChecker.checkAndSetDeprecatedParam( - THREADS_PER_ALLOCATION.getDeprecatedNames()[0], - THREADS_PER_ALLOCATION.getPreferredName(), - RestApiVersion.V_8, - restRequest, - (r, s) -> r.paramAsInt(s, request.getThreadsPerAllocation()), - request::setThreadsPerAllocation - ); - request.setQueueCapacity(restRequest.paramAsInt(QUEUE_CAPACITY.getPreferredName(), request.getQueueCapacity())); - if (restRequest.hasParam(CACHE_SIZE.getPreferredName())) { - request.setCacheSize( - ByteSizeValue.parseBytesSizeValue(restRequest.param(CACHE_SIZE.getPreferredName()), CACHE_SIZE.getPreferredName()) - ); - } else if (defaultCacheSize != null) { - request.setCacheSize(defaultCacheSize); - } - request.setQueueCapacity(restRequest.paramAsInt(QUEUE_CAPACITY.getPreferredName(), request.getQueueCapacity())); - request.setPriority( - restRequest.param( - StartTrainedModelDeploymentAction.TaskParams.PRIORITY.getPreferredName(), - request.getPriority().toString() + } + + if (restRequest.hasParam(TIMEOUT.getPreferredName())) { + TimeValue openTimeout = validateParameters( + request.getTimeout(), + restRequest.paramAsTime(TIMEOUT.getPreferredName(), StartTrainedModelDeploymentAction.DEFAULT_TIMEOUT), + StartTrainedModelDeploymentAction.DEFAULT_TIMEOUT + ); // hasParam, so never default + request.setTimeout(openTimeout); + } + + request.setWaitForState( + validateParameters( + request.getWaitForState(), + AllocationStatus.State.fromString( + restRequest.param(WAIT_FOR.getPreferredName(), StartTrainedModelDeploymentAction.DEFAULT_WAITFOR_STATE.toString()) + ), + StartTrainedModelDeploymentAction.DEFAULT_WAITFOR_STATE + ) + ); + + RestCompatibilityChecker.checkAndSetDeprecatedParam( + NUMBER_OF_ALLOCATIONS.getDeprecatedNames()[0], + NUMBER_OF_ALLOCATIONS.getPreferredName(), + RestApiVersion.V_8, + restRequest, + (r, s) -> validateParameters( + request.getNumberOfAllocations(), + r.paramAsInt(s, StartTrainedModelDeploymentAction.DEFAULT_NUM_ALLOCATIONS), + StartTrainedModelDeploymentAction.DEFAULT_NUM_ALLOCATIONS + ), + request::setNumberOfAllocations + ); + RestCompatibilityChecker.checkAndSetDeprecatedParam( + THREADS_PER_ALLOCATION.getDeprecatedNames()[0], + THREADS_PER_ALLOCATION.getPreferredName(), + RestApiVersion.V_8, + restRequest, + (r, s) -> validateParameters( + request.getThreadsPerAllocation(), + r.paramAsInt(s, StartTrainedModelDeploymentAction.DEFAULT_NUM_THREADS), + StartTrainedModelDeploymentAction.DEFAULT_NUM_THREADS + ), + request::setThreadsPerAllocation + ); + request.setQueueCapacity( + validateParameters( + request.getQueueCapacity(), + restRequest.paramAsInt(QUEUE_CAPACITY.getPreferredName(), StartTrainedModelDeploymentAction.DEFAULT_QUEUE_CAPACITY), + StartTrainedModelDeploymentAction.DEFAULT_QUEUE_CAPACITY + ) + ); + + if (restRequest.hasParam(CACHE_SIZE.getPreferredName())) { + request.setCacheSize( + validateParameters( + request.getCacheSize(), + ByteSizeValue.parseBytesSizeValue(restRequest.param(CACHE_SIZE.getPreferredName()), CACHE_SIZE.getPreferredName()), + null ) ); + } else if (defaultCacheSize != null && request.getCacheSize() == null) { + request.setCacheSize(defaultCacheSize); } + request.setPriority( + validateParameters( + request.getPriority().toString(), + restRequest.param(StartTrainedModelDeploymentAction.TaskParams.PRIORITY.getPreferredName()), + StartTrainedModelDeploymentAction.DEFAULT_PRIORITY.toString() + ) + ); + return channel -> client.execute(StartTrainedModelDeploymentAction.INSTANCE, request, new RestToXContentListener<>(channel)); } + + /** + * This function validates that the body and query parameters don't conflict, and returns the value that should be used. + * When using this function, the body parameter should already have been set to the default value in + * {@link StartTrainedModelDeploymentAction}, or, set to a different value from the rest request. + * + * @param paramDefault (from {@link StartTrainedModelDeploymentAction}) + * @return the parameter to use + * @throws ElasticsearchStatusException if the parameters don't match + */ + private static T validateParameters(@Nullable T bodyParam, @Nullable T queryParam, @Nullable T paramDefault) + throws ElasticsearchStatusException { + if (Objects.equals(bodyParam, paramDefault) && queryParam != null) { + // the body param is the same as the default for this value. We cannot tell if this was set intentionally, or if it was just the + // default, thus we will assume it was the default + return queryParam; + } + + if (Objects.equals(bodyParam, queryParam)) { + return bodyParam; + } else if (bodyParam == null) { + return queryParam; + } else if (queryParam == null) { + return bodyParam; + } else { + throw new ElasticsearchStatusException( + "The parameter " + bodyParam + " in the body is different from the parameter " + queryParam + " in the query", + RestStatus.BAD_REQUEST + ); + } + } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/rest/inference/RestStartTrainedModelDeploymentActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/rest/inference/RestStartTrainedModelDeploymentActionTests.java index 26f877a110dc4..7c1f499640e64 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/rest/inference/RestStartTrainedModelDeploymentActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/rest/inference/RestStartTrainedModelDeploymentActionTests.java @@ -8,14 +8,21 @@ package org.elasticsearch.xpack.ml.rest.inference; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.test.rest.RestActionTestCase; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentTests; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -64,6 +71,30 @@ public void testCacheEnabled() { assertThat(executeCalled.get(), equalTo(true)); } + public void testExceptionFromDifferentParamsInQueryAndBody() throws IOException { + SetOnce executeCalled = new SetOnce<>(); + controller().registerHandler(new RestStartTrainedModelDeploymentAction(false)); + verifyingClient.setExecuteVerifier(((actionType, actionRequest) -> { + assertThat(actionRequest, instanceOf(StartTrainedModelDeploymentAction.Request.class)); + executeCalled.set(true); + return createResponse(); + })); + + Map paramsMap = new HashMap<>(1); + paramsMap.put("cache_size", "1mb"); + RestRequest inferenceRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) + .withPath("_ml/trained_models/test_id/deployment/_start") + .withParams(paramsMap) + .withContent( + BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("cache_size", "2mb").endObject()), + XContentType.JSON + ) + .build(); + dispatchRequest(inferenceRequest); + assertThat(executeCalled.get(), equalTo(null)); // the duplicate parameter should cause an exception, but the exception isn't + // visible here, so we just check that the request failed + } + private static CreateTrainedModelAssignmentAction.Response createResponse() { return new CreateTrainedModelAssignmentAction.Response(TrainedModelAssignmentTests.randomInstance()); } From a1b4d16a968c71c6b585ff80938f937dc3647e5c Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 18 Jun 2024 19:49:48 +0100 Subject: [PATCH 09/44] AwaitsFix for #108613 --- .../java/org/elasticsearch/readiness/ReadinessClusterIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java index b7a1dc12406d2..5b44a949ab784 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java @@ -122,6 +122,7 @@ private void expectMasterNotFound() { ); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/108613") public void testReadinessDuringRestarts() throws Exception { internalCluster().setBootstrapMasterNodeIndex(0); writeFileSettings(testJSON); From 653d800b67fcc833de65bc8f29a9461d3383a00f Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 18 Jun 2024 20:15:04 +0100 Subject: [PATCH 10/44] AwaitsFix for #109884 --- .../elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java index ed67275a90468..4744d3939a3bb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java @@ -360,6 +360,7 @@ public void testEsqlProject() throws IOException { EqualsHashCodeTestUtils.checkEqualsAndHashCode(orig, unused -> deser); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/109884") public void testMvExpand() throws IOException { var esRelation = new EsRelation( Source.EMPTY, From 44f59684487e2c787cd8b242e470ca58795e151b Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 18 Jun 2024 13:33:44 -0700 Subject: [PATCH 11/44] Adjust packaging test exit code assertion (#109879) --- muted-tests.yml | 2 -- .../java/org/elasticsearch/packaging/test/PackageTests.java | 2 +- .../elasticsearch/packaging/test/RpmPreservationTests.java | 4 ++-- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index b533a29bfea1e..c3fc9c0fe4c78 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -67,8 +67,6 @@ tests: - class: "org.elasticsearch.xpack.shutdown.NodeShutdownReadinessIT" issue: "https://github.com/elastic/elasticsearch/issues/109838" method: "testShutdownReadinessService" -- class: "org.elasticsearch.packaging.test.PackageTests" - issue: "https://github.com/elastic/elasticsearch/issues/109852" - class: "org.elasticsearch.cluster.PrevalidateShardPathIT" issue: "https://github.com/elastic/elasticsearch/issues/104807" method: "testCheckShards" diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackageTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackageTests.java index 5c38fa36a6640..bff48ba74e8fc 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackageTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackageTests.java @@ -194,7 +194,7 @@ public void test50Remove() throws Exception { } assertThat(sh.runIgnoreExitCode("systemctl status elasticsearch.service").exitCode(), is(statusExitCode)); - assertThat(sh.runIgnoreExitCode("systemctl is-enabled elasticsearch.service").exitCode(), is(1)); + assertThat(sh.runIgnoreExitCode("systemctl is-enabled elasticsearch.service").exitCode(), not(0)); } diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/RpmPreservationTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/RpmPreservationTests.java index 19a9d9b74048e..18537c744f8b2 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/RpmPreservationTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/RpmPreservationTests.java @@ -28,7 +28,7 @@ import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation; import static org.elasticsearch.packaging.util.Platforms.isSystemd; import static org.elasticsearch.packaging.util.ServerUtils.enableGeoIpDownloader; -import static org.hamcrest.core.Is.is; +import static org.hamcrest.Matchers.not; import static org.junit.Assume.assumeTrue; public class RpmPreservationTests extends PackagingTestCase { @@ -78,7 +78,7 @@ public void test30PreserveConfig() throws Exception { assertRemoved(distribution()); if (isSystemd()) { - assertThat(sh.runIgnoreExitCode("systemctl is-enabled elasticsearch.service").exitCode(), is(1)); + assertThat(sh.runIgnoreExitCode("systemctl is-enabled elasticsearch.service").exitCode(), not(0)); } assertPathsDoNotExist( From 29dfa2e7ba888a144158d4803d1f4d79cec339e9 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 18 Jun 2024 22:57:36 +0100 Subject: [PATCH 12/44] AwaitsFix for #109894 and #109895 --- .../security/authz/store/NativePrivilegeStoreCacheTests.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreCacheTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreCacheTests.java index d11ca70744b7b..3094a10b1572d 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreCacheTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreCacheTests.java @@ -116,6 +116,7 @@ public void configureApplicationPrivileges() { assertEquals(6, putPrivilegesResponse.created().values().stream().mapToInt(List::size).sum()); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/109894") public void testGetPrivilegesUsesCache() { final Client client = client(); @@ -204,6 +205,7 @@ public void testPopulationOfCacheWhenLoadingPrivilegesForAllApplications() { assertEquals(1, new GetPrivilegesRequestBuilder(client).application("app-1").privileges("write").get().privileges().length); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/109895") public void testSuffixWildcard() { final Client client = client(); From 656a3800a8a043c5e9f67814275156c28289e5f2 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 18 Jun 2024 18:40:15 -0400 Subject: [PATCH 13/44] ESQL: Fix test bug (#109896) Duplicate keys broke the test setup 1 in 25 times. Closes #109884 --- .../xpack/esql/io/stream/PlanNamedTypesTests.java | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java index 4744d3939a3bb..57d304a4f032e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java @@ -110,6 +110,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.TreeSet; import java.util.stream.Stream; import static org.elasticsearch.test.ListMatcher.matchesList; @@ -360,7 +361,6 @@ public void testEsqlProject() throws IOException { EqualsHashCodeTestUtils.checkEqualsAndHashCode(orig, unused -> deser); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/109884") public void testMvExpand() throws IOException { var esRelation = new EsRelation( Source.EMPTY, @@ -383,10 +383,14 @@ private static void assertNamedType(Class type, T origObj) { } static EsIndex randomEsIndex() { + Set concreteIndices = new TreeSet<>(); + while (concreteIndices.size() < 2) { + concreteIndices.add(randomAlphaOfLengthBetween(1, 25)); + } return new EsIndex( - randomAlphaOfLength(randomIntBetween(1, 25)), - Map.of(randomAlphaOfLength(randomIntBetween(1, 25)), randomKeywordEsField()), - Set.of(randomAlphaOfLength(randomIntBetween(1, 25)), randomAlphaOfLength(randomIntBetween(1, 25))) + randomAlphaOfLengthBetween(1, 25), + Map.of(randomAlphaOfLengthBetween(1, 25), randomKeywordEsField()), + concreteIndices ); } From 9507e8620cdf79a4df7d2e896a9f5ad70e1e8196 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Wed, 19 Jun 2024 09:45:52 +1000 Subject: [PATCH 14/44] AwaitsFix: https://github.com/elastic/elasticsearch/issues/109898 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index c3fc9c0fe4c78..9b44732c59e33 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -70,6 +70,9 @@ tests: - class: "org.elasticsearch.cluster.PrevalidateShardPathIT" issue: "https://github.com/elastic/elasticsearch/issues/104807" method: "testCheckShards" +- class: "org.elasticsearch.packaging.test.RpmPreservationTests" + issue: "https://github.com/elastic/elasticsearch/issues/109898" + method: "test30PreserveConfig" # Examples: # From 6ed256a308d7f84e1ed20db15ba6c8702025831e Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 18 Jun 2024 20:10:27 -0400 Subject: [PATCH 15/44] ESQL: Merge two data type utility methods (#109856) This merges `EsqlDataTypes#fromJava` into `DataType#fromJava`. Now that we've forked classic-QL into ESQL there isn't a reason to have both utility methods. There isn't really a reason to have `EsqlDataTypes` at all, but it'll take some time to merge it all into `DataType`. --- .../xpack/esql/core/type/DataType.java | 3 +- .../function/scalar/FunctionTestUtils.java | 67 ------------------- .../xpack/esql/action/RequestXContent.java | 3 +- .../esql/type/EsqlDataTypeConverter.java | 2 +- .../xpack/esql/type/EsqlDataTypeRegistry.java | 2 +- .../xpack/esql/type/EsqlDataTypes.java | 28 -------- .../xpack/esql/analysis/VerifierTests.java | 2 +- .../expression/function/TestCaseSupplier.java | 2 +- .../scalar/conditional/CaseTests.java | 3 +- 9 files changed, 8 insertions(+), 104 deletions(-) diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java index 53b191dbd6332..7f48751535ba9 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.esql.core.type; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.mapper.SourceFieldMapper; @@ -199,7 +200,7 @@ public static DataType fromJava(Object value) { if (value instanceof ZonedDateTime) { return DATETIME; } - if (value instanceof String || value instanceof Character) { + if (value instanceof String || value instanceof Character || value instanceof BytesRef) { return KEYWORD; } diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/FunctionTestUtils.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/FunctionTestUtils.java index 8f0ff30074b83..9e3ab40ec6462 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/FunctionTestUtils.java +++ b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/FunctionTestUtils.java @@ -7,20 +7,10 @@ package org.elasticsearch.xpack.esql.core.expression.function.scalar; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.type.DataType; -import java.time.Instant; -import java.time.ZonedDateTime; -import java.util.BitSet; -import java.util.Iterator; - import static org.elasticsearch.xpack.esql.core.tree.Source.EMPTY; -import static org.elasticsearch.xpack.esql.core.type.DataType.BOOLEAN; -import static org.elasticsearch.xpack.esql.core.type.DataType.DATETIME; -import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; -import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; public final class FunctionTestUtils { @@ -31,61 +21,4 @@ public static Literal l(Object value) { public static Literal l(Object value, DataType type) { return new Literal(EMPTY, value, type); } - - public static Literal randomStringLiteral() { - return l(ESTestCase.randomRealisticUnicodeOfLength(10), KEYWORD); - } - - public static Literal randomIntLiteral() { - return l(ESTestCase.randomInt(), INTEGER); - } - - public static Literal randomBooleanLiteral() { - return l(ESTestCase.randomBoolean(), BOOLEAN); - } - - public static Literal randomDatetimeLiteral() { - return l(ZonedDateTime.ofInstant(Instant.ofEpochMilli(ESTestCase.randomLong()), ESTestCase.randomZone()), DATETIME); - } - - public static class Combinations implements Iterable { - private int n; - private int k; - - public Combinations(int n, int k) { - this.n = n; - this.k = k; - } - - @Override - public Iterator iterator() { - return new Iterator<>() { - BitSet bs = new BitSet(n); - - { - bs.set(0, k); - } - - @Override - public boolean hasNext() { - return bs != null; - } - - @Override - public BitSet next() { - BitSet old = (BitSet) bs.clone(); - int b = bs.previousClearBit(n - 1); - int b1 = bs.previousSetBit(b); - if (b1 == -1) { - bs = null; - } else { - bs.clear(b1); - bs.set(b1 + 1, b1 + (n - b) + 1); - bs.clear(b1 + (n - b) + 1, n); - } - return old; - } - }; - } - } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java index 2c6b5e7a6b490..4c511a4450bc8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java @@ -19,7 +19,6 @@ import org.elasticsearch.xpack.esql.parser.QueryParam; import org.elasticsearch.xpack.esql.parser.QueryParams; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; -import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import java.io.IOException; import java.util.ArrayList; @@ -165,7 +164,7 @@ private static QueryParams parseParams(XContentParser p) throws IOException { ) ); } - type = EsqlDataTypes.fromJava(entry.getValue()); + type = DataType.fromJava(entry.getValue()); if (type == null) { errors.add(new XContentParseException(loc, entry + " is not supported as a parameter")); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java index cc2525799224b..23a94bde56b1e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java @@ -211,7 +211,7 @@ public static TemporalAmount parseTemporalAmount(Object val, DataType expectedTy * Throws QlIllegalArgumentException if such conversion is not possible */ public static Object convert(Object value, DataType dataType) { - DataType detectedType = EsqlDataTypes.fromJava(value); + DataType detectedType = DataType.fromJava(value); if (detectedType == dataType || value == null) { return value; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java index dc680e5305842..ee28a7fe9941a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java @@ -44,7 +44,7 @@ public DataType fromEs(String typeName, TimeSeriesParams.MetricType metricType) @Override public DataType fromJava(Object value) { - return EsqlDataTypes.fromJava(value); + return DataType.fromJava(value); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java index de97d6925e002..2d817d65f6ba9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.esql.type; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.xpack.esql.core.type.DataType; import java.util.Collections; @@ -15,7 +14,6 @@ import static java.util.stream.Collectors.toMap; import static java.util.stream.Collectors.toUnmodifiableMap; -import static org.elasticsearch.xpack.esql.core.type.DataType.BOOLEAN; import static org.elasticsearch.xpack.esql.core.type.DataType.BYTE; import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_PERIOD; import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; @@ -62,32 +60,6 @@ public static DataType fromName(String name) { return type != null ? type : UNSUPPORTED; } - public static DataType fromJava(Object value) { - if (value == null) { - return NULL; - } - if (value instanceof Boolean) { - return BOOLEAN; - } - if (value instanceof Integer) { - return INTEGER; - } - if (value instanceof Long) { - return LONG; - } - if (value instanceof Double) { - return DOUBLE; - } - if (value instanceof Float) { - return FLOAT; - } - if (value instanceof String || value instanceof Character || value instanceof BytesRef) { - return KEYWORD; - } - - return null; - } - public static boolean isUnsupported(DataType type) { return DataType.isUnsupported(type); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index e5f59f1ffa8ad..8eef05bd9687b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -564,7 +564,7 @@ private String error(String query, Analyzer analyzer, Object... params) { } else if (param instanceof String) { parameters.add(new QueryParam(null, param, KEYWORD)); } else if (param instanceof Number) { - parameters.add(new QueryParam(null, param, EsqlDataTypes.fromJava(param))); + parameters.add(new QueryParam(null, param, DataType.fromJava(param))); } else { throw new IllegalArgumentException("VerifierTests don't support params of type " + param.getClass()); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java index 54c4f2ae07eca..7eadad58ec09b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java @@ -1417,7 +1417,7 @@ public TypedData(Object data, DataType type, String name) { * @param name a name for the value, used for generating test case names */ public TypedData(Object data, String name) { - this(data, EsqlDataTypes.fromJava(data), name); + this(data, DataType.fromJava(data), name); } /** diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java index f24955eb4804a..02da8ea22a6a0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; -import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import java.math.BigInteger; import java.util.List; @@ -334,7 +333,7 @@ private static Case caseExpr(Object... args) { if (arg instanceof Expression e) { return e; } - return new Literal(Source.synthetic(arg == null ? "null" : arg.toString()), arg, EsqlDataTypes.fromJava(arg)); + return new Literal(Source.synthetic(arg == null ? "null" : arg.toString()), arg, DataType.fromJava(arg)); }).toList(); return new Case(Source.synthetic(""), exps.get(0), exps.subList(1, exps.size())); } From c8da5812a9b33a0c2b02768d0122998f87d35918 Mon Sep 17 00:00:00 2001 From: Nick Tindall Date: Wed, 19 Jun 2024 10:39:23 +1000 Subject: [PATCH 16/44] [TEST] Add test for LeakTracker (#109777) Related to #101300 --- .../action/ActionListenerTests.java | 2 +- .../transport/LeakTrackerTests.java | 137 ++++++++++++++++++ .../org/elasticsearch/test/ESTestCase.java | 15 +- 3 files changed, 143 insertions(+), 11 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/transport/LeakTrackerTests.java diff --git a/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java b/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java index 1e18b71c99090..0543bce08a4f0 100644 --- a/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java +++ b/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java @@ -380,7 +380,7 @@ public void testAssertAtLeastOnceWillLogAssertionErrorWhenNotResolved() throws E listenerRef = null; assertBusy(() -> { System.gc(); - assertLeakDetected("LEAK: resource was not cleaned up before it was garbage-collected\\.(.*|\\s)*"); + assertLeakDetected(); }); } diff --git a/server/src/test/java/org/elasticsearch/transport/LeakTrackerTests.java b/server/src/test/java/org/elasticsearch/transport/LeakTrackerTests.java new file mode 100644 index 0000000000000..f13858351b7ee --- /dev/null +++ b/server/src/test/java/org/elasticsearch/transport/LeakTrackerTests.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.transport; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.Assertions; +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.ReachabilityChecker; +import org.junit.Before; + +import java.io.Closeable; +import java.io.IOException; +import java.util.stream.Stream; + +public class LeakTrackerTests extends ESTestCase { + + private static final Logger logger = LogManager.getLogger(); + + private final TrackedObjectLifecycle trackedObjectLifecycle; + private ReachabilityChecker reachabilityChecker; + + @Before + public void createReachabilityTracker() { + reachabilityChecker = new ReachabilityChecker(); + } + + @Before + public void onlyRunWhenAssertionsAreEnabled() { + assumeTrue("Many of these tests don't make sense when assertions are disabled", Assertions.ENABLED); + } + + public LeakTrackerTests(@Name("trackingMethod") TrackedObjectLifecycle trackedObjectLifecycle) { + this.trackedObjectLifecycle = trackedObjectLifecycle; + } + + @ParametersFactory(shuffle = false) + public static Iterable parameters() { + return Stream.of( + new PojoTrackedObjectLifecycle(), + new ReleasableTrackedObjectLifecycle(), + new ReferenceCountedTrackedObjectLifecycle() + ).map(i -> new Object[] { i }).toList(); + } + + @SuppressWarnings("resource") + public void testWillLogErrorWhenTrackedObjectIsNotClosed() throws Exception { + // Let it go out of scope without closing + trackedObjectLifecycle.createAndTrack(reachabilityChecker); + reachabilityChecker.ensureUnreachable(); + assertBusy(ESTestCase::assertLeakDetected); + } + + public void testWillNotLogErrorWhenTrackedObjectIsClosed() throws IOException { + // Close before letting it go out of scope + trackedObjectLifecycle.createAndTrack(reachabilityChecker).close(); + reachabilityChecker.ensureUnreachable(); + } + + /** + * Encapsulates the lifecycle for a particular type of tracked object + */ + public interface TrackedObjectLifecycle { + + /** + * Create the tracked object, implementations must + * - track it with the {@link LeakTracker} + * - register it with the passed reachability checker + * @param reachabilityChecker The reachability checker + * @return A {@link Closeable} that retains a reference to the tracked object, and when closed will do the appropriate cleanup + */ + Closeable createAndTrack(ReachabilityChecker reachabilityChecker); + } + + private static class PojoTrackedObjectLifecycle implements TrackedObjectLifecycle { + + @Override + public Closeable createAndTrack(ReachabilityChecker reachabilityChecker) { + final Object object = reachabilityChecker.register(new Object()); + final LeakTracker.Leak leak = LeakTracker.INSTANCE.track(object); + return () -> { + logger.info("This log line retains a reference to {}", object); + leak.close(); + }; + } + + @Override + public String toString() { + return "LeakTracker.INSTANCE.track(Object)"; + } + } + + private static class ReferenceCountedTrackedObjectLifecycle implements TrackedObjectLifecycle { + + @Override + public Closeable createAndTrack(ReachabilityChecker reachabilityChecker) { + RefCounted refCounted = LeakTracker.wrap(reachabilityChecker.register((RefCounted) AbstractRefCounted.of(() -> {}))); + refCounted.incRef(); + refCounted.tryIncRef(); + return () -> { + refCounted.decRef(); // tryIncRef + refCounted.decRef(); // incRef + refCounted.decRef(); // implicit + }; + } + + @Override + public String toString() { + return "LeakTracker.wrap(RefCounted)"; + } + } + + private static class ReleasableTrackedObjectLifecycle implements TrackedObjectLifecycle { + + @Override + public Closeable createAndTrack(ReachabilityChecker reachabilityChecker) { + return LeakTracker.wrap(reachabilityChecker.register(Releasables.assertOnce(() -> {}))); + } + + @Override + public String toString() { + return "LeakTracker.wrap(Releasable)"; + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 8ae5cdd8b9217..46d5e9441b83d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -186,7 +186,6 @@ import java.util.function.IntFunction; import java.util.function.Predicate; import java.util.function.Supplier; -import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.DoubleStream; import java.util.stream.IntStream; @@ -790,17 +789,13 @@ protected static void checkStaticState() throws Exception { } /** - * Assert that a leak was detected, also remove the leak from the list of detected leaks - * so the test won't fail for that specific leak. - * - * @param expectedPattern A pattern that matches the detected leak's exception + * Assert that at least one leak was detected, also clear the list of detected leaks + * so the test won't fail for leaks detected up until this point. */ - protected static void assertLeakDetected(String expectedPattern) { + protected static void assertLeakDetected() { synchronized (loggedLeaks) { - assertTrue( - "No leak detected matching the pattern: " + expectedPattern, - loggedLeaks.removeIf(leakText -> Pattern.matches(expectedPattern, leakText)) - ); + assertFalse("No leaks have been detected", loggedLeaks.isEmpty()); + loggedLeaks.clear(); } } From 5aa9f442a3efba5c73d96916341b7e25fea7a212 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 19 Jun 2024 05:54:44 +0100 Subject: [PATCH 17/44] Ensure tasks preserve versions in `MasterService` (#109850) `ClusterState#version`, `Metadata#version` and `RoutingTable#version` are all managed solely by the `MasterService`, in the sense that it's a definite bug for the cluster state update task executor to meddle with them. Today if we encounter such a bug then we try and publish the resulting state anyway, which hopefully fails (triggering a master election) but it may in theory succeed (potentially reverting older cluster state updates). Neither is a particularly good outcome. With this commit we add a check for consistency of these version numbers during the cluster state computation and fail the state update without a master failover if a discrepancy is found. It also fixes a super-subtle bug in `TransportMigrateToDataTiersAction` that can muck up these version numbers. --- docs/changelog/109850.yaml | 5 ++ .../cluster/service/MasterService.java | 32 +++++++- .../cluster/service/MasterServiceTests.java | 74 ++++++++++++++++++- .../gateway/GatewayServiceTests.java | 15 +++- .../TransportMigrateToDataTiersAction.java | 4 +- 5 files changed, 121 insertions(+), 9 deletions(-) create mode 100644 docs/changelog/109850.yaml diff --git a/docs/changelog/109850.yaml b/docs/changelog/109850.yaml new file mode 100644 index 0000000000000..0f11318765aea --- /dev/null +++ b/docs/changelog/109850.yaml @@ -0,0 +1,5 @@ +pr: 109850 +summary: Ensure tasks preserve versions in `MasterService` +area: Cluster Coordination +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index 7f9720b64cca6..296acc30a83f5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -524,6 +524,24 @@ public Builder incrementVersion(ClusterState clusterState) { return ClusterState.builder(clusterState).incrementVersion(); } + private static boolean versionNumbersPreserved(ClusterState oldState, ClusterState newState) { + if (oldState.nodes().getMasterNodeId() == null && newState.nodes().getMasterNodeId() != null) { + return true; // NodeJoinExecutor is special, we trust it to do the right thing with versions + } + + if (oldState.version() != newState.version()) { + return false; + } + if (oldState.metadata().version() != newState.metadata().version()) { + return false; + } + if (oldState.routingTable().version() != newState.routingTable().version()) { + // GatewayService is special and for odd legacy reasons gets to do this: + return oldState.clusterRecovered() == false && newState.clusterRecovered() && newState.routingTable().version() == 0; + } + return true; + } + /** * Submits an unbatched cluster state update task. This method exists for legacy reasons but is deprecated and forbidden in new * production code because unbatched tasks are a source of performance and stability bugs. You should instead implement your update @@ -1035,6 +1053,8 @@ private static boolean assertAllTasksComple return true; } + static final String TEST_ONLY_EXECUTOR_MAY_CHANGE_VERSION_NUMBER_TRANSIENT_NAME = "test_only_executor_may_change_version_number"; + private static ClusterState innerExecuteTasks( ClusterState previousClusterState, List> executionResults, @@ -1047,13 +1067,23 @@ private static ClusterState innerExecuteTas // to avoid leaking headers in production that were missed by tests try { - return executor.execute( + final var updatedState = executor.execute( new ClusterStateTaskExecutor.BatchExecutionContext<>( previousClusterState, executionResults, threadContext::newStoredContext ) ); + if (versionNumbersPreserved(previousClusterState, updatedState) == false) { + // Shenanigans! Executors mustn't meddle with version numbers. Perhaps the executor based its update on the wrong + // initial state, potentially losing an intervening cluster state update. That'd be very bad! + final var exception = new IllegalStateException( + "cluster state update executor did not preserve version numbers: [" + summary.toString() + "]" + ); + assert threadContext.getTransient(TEST_ONLY_EXECUTOR_MAY_CHANGE_VERSION_NUMBER_TRANSIENT_NAME) != null : exception; + throw exception; + } + return updatedState; } catch (Exception e) { logger.trace( () -> format( diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index 43f3943c9c041..26faa295cf727 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.component.Lifecycle; @@ -77,6 +78,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.UnaryOperator; import java.util.stream.Collectors; import static java.util.Collections.emptySet; @@ -93,6 +95,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.startsWith; public class MasterServiceTests extends ESTestCase { @@ -498,7 +501,7 @@ public void onFailure(Exception e) {} @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += TimeValue.timeValueSeconds(3).millis(); - return ClusterState.builder(currentState).incrementVersion().build(); + return ClusterState.builder(currentState).build(); } @Override @@ -1243,7 +1246,7 @@ public void onFailure(Exception e) { public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis() + randomLongBetween(1, 1000000); - return ClusterState.builder(currentState).incrementVersion().build(); + return ClusterState.builder(currentState).build(); } @Override @@ -1277,7 +1280,7 @@ public void onFailure(Exception e) { masterService.submitUnbatchedStateUpdateTask("test5", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - return ClusterState.builder(currentState).incrementVersion().build(); + return ClusterState.builder(currentState).build(); } @Override @@ -1293,7 +1296,7 @@ public void onFailure(Exception e) { masterService.submitUnbatchedStateUpdateTask("test6", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - return ClusterState.builder(currentState).incrementVersion().build(); + return ClusterState.builder(currentState).build(); } @Override @@ -2592,6 +2595,69 @@ public void onFailure(Exception e) { } } + public void testVersionNumberProtection() { + runVersionNumberProtectionTest( + currentState -> ClusterState.builder(currentState) + .version(randomFrom(currentState.version() - 1, currentState.version() + 1)) + .build() + ); + + runVersionNumberProtectionTest( + currentState -> currentState.copyAndUpdateMetadata( + b -> b.version(randomFrom(currentState.metadata().version() - 1, currentState.metadata().version() + 1)) + ) + ); + + runVersionNumberProtectionTest( + currentState -> ClusterState.builder(currentState) + .routingTable( + RoutingTable.builder(currentState.routingTable()) + .version(randomFrom(currentState.routingTable().version() - 1, currentState.routingTable().version() + 1)) + .build() + ) + .build() + ); + } + + private void runVersionNumberProtectionTest(UnaryOperator updateOperator) { + final var deterministicTaskQueue = new DeterministicTaskQueue(); + final var threadPool = deterministicTaskQueue.getThreadPool(); + final var threadContext = threadPool.getThreadContext(); + final var failureCaught = new AtomicBoolean(); + + try ( + var masterService = createMasterService(true, null, threadPool, deterministicTaskQueue.getPrioritizedEsThreadPoolExecutor()); + var ignored = threadContext.stashContext() + ) { + final var taskId = randomIdentifier(); + + masterService.submitUnbatchedStateUpdateTask(taskId, new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return updateOperator.apply(currentState); + } + + @Override + public void onFailure(Exception e) { + assertThat( + asInstanceOf(IllegalStateException.class, e).getMessage(), + allOf(startsWith("cluster state update executor did not preserve version numbers"), containsString(taskId)) + ); + assertTrue(failureCaught.compareAndSet(false, true)); + } + }); + + // suppress assertion errors to check production behaviour + threadContext.putTransient(MasterService.TEST_ONLY_EXECUTOR_MAY_CHANGE_VERSION_NUMBER_TRANSIENT_NAME, new Object()); + threadContext.markAsSystemContext(); + deterministicTaskQueue.runAllRunnableTasks(); + assertFalse(deterministicTaskQueue.hasRunnableTasks()); + assertFalse(deterministicTaskQueue.hasDeferredTasks()); + + assertTrue(failureCaught.get()); + } + } + /** * Returns the cluster state that the master service uses (and that is provided by the discovery layer) */ diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java index 2136b154480ff..0524412cff70b 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RerouteService; +import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRoutingRoleStrategy; import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.ClusterService; @@ -494,12 +495,22 @@ public void onFailure(Exception e) { private MasterServiceTaskQueue createSetClusterStateTaskQueue(ClusterService clusterService) { return clusterService.createTaskQueue("set-cluster-state", Priority.NORMAL, batchExecutionContext -> { - ClusterState targetState = batchExecutionContext.initialState(); + final var initialState = batchExecutionContext.initialState(); + var targetState = initialState; for (var taskContext : batchExecutionContext.taskContexts()) { targetState = taskContext.getTask().clusterState(); taskContext.success(() -> {}); } - return targetState; + // fix up the version numbers + final var finalStateBuilder = ClusterState.builder(targetState) + .version(initialState.version()) + .metadata(Metadata.builder(targetState.metadata()).version(initialState.metadata().version())); + if (initialState.clusterRecovered() || targetState.clusterRecovered() == false) { + finalStateBuilder.routingTable( + RoutingTable.builder(targetState.routingTable()).version(initialState.routingTable().version()) + ); + } + return finalStateBuilder.build(); }); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMigrateToDataTiersAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMigrateToDataTiersAction.java index 8cc14a42eb5f3..472b9bdd0b800 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMigrateToDataTiersAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMigrateToDataTiersAction.java @@ -123,9 +123,9 @@ protected void masterOperation( final SetOnce migratedEntities = new SetOnce<>(); submitUnbatchedTask("migrate-to-data-tiers []", new ClusterStateUpdateTask(Priority.HIGH) { @Override - public ClusterState execute(ClusterState currentState) throws Exception { + public ClusterState execute(ClusterState currentState) { Tuple migratedEntitiesTuple = migrateToDataTiersRouting( - state, + currentState, request.getNodeAttributeName(), request.getLegacyTemplateToDelete(), xContentRegistry, From 2627e1787bd4fc69e960ac782e698fbcd6d5e6b0 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Wed, 19 Jun 2024 16:09:14 +1000 Subject: [PATCH 18/44] AwaitsFix: https://github.com/elastic/elasticsearch/issues/109905 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 9b44732c59e33..abb1b0841f2fb 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -73,6 +73,9 @@ tests: - class: "org.elasticsearch.packaging.test.RpmPreservationTests" issue: "https://github.com/elastic/elasticsearch/issues/109898" method: "test30PreserveConfig" +- class: "org.elasticsearch.xpack.security.ScrollHelperIntegTests" + issue: "https://github.com/elastic/elasticsearch/issues/109905" + method: "testFetchAllEntities" # Examples: # From 7683beeb76018734f91dc7ae5ec4d2cd24754b7e Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Wed, 19 Jun 2024 07:32:35 +0100 Subject: [PATCH 19/44] ES|QL Add topn support for Floats (#109870) This commit adds topn support for Float to the compute engine. --- .../org/elasticsearch/test/ESTestCase.java | 29 ++++ x-pack/plugin/esql/compute/build.gradle | 15 ++ .../operator/topn/KeyExtractorForFloat.java | 156 ++++++++++++++++++ .../operator/topn/ResultBuilderForFloat.java | 76 +++++++++ .../operator/topn/ValueExtractorForFloat.java | 84 ++++++++++ .../topn/DefaultUnsortableTopNEncoder.java | 19 +++ .../compute/operator/topn/KeyExtractor.java | 2 + .../compute/operator/topn/ResultBuilder.java | 1 + .../operator/topn/SortableTopNEncoder.java | 18 ++ .../compute/operator/topn/TopNEncoder.java | 4 + .../compute/operator/topn/ValueExtractor.java | 2 + .../operator/topn/TopNOperatorTests.java | 21 ++- 12 files changed, 424 insertions(+), 3 deletions(-) create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForFloat.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForFloat.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForFloat.java diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 46d5e9441b83d..0d20c613b27a8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -1001,6 +1001,35 @@ public static float randomFloat() { return random().nextFloat(); } + /** + * Returns a float value in the interval [start, end) if lowerInclusive is + * set to true, (start, end) otherwise. + * + * @param start lower bound of interval to draw uniformly distributed random numbers from + * @param end upper bound + * @param lowerInclusive whether or not to include lower end of the interval + */ + public static float randomFloatBetween(float start, float end, boolean lowerInclusive) { + float result; + + if (start == -Float.MAX_VALUE || end == Float.MAX_VALUE) { + // formula below does not work with very large floats + result = Float.intBitsToFloat(randomInt()); + while (result < start || result > end || Double.isNaN(result)) { + result = Float.intBitsToFloat(randomInt()); + } + } else { + result = randomFloat(); + if (lowerInclusive == false) { + while (result <= 0.0f) { + result = randomFloat(); + } + } + result = result * end + (1.0f - result) * start; + } + return result; + } + public static double randomDouble() { return random().nextDouble(); } diff --git a/x-pack/plugin/esql/compute/build.gradle b/x-pack/plugin/esql/compute/build.gradle index 4e59ec7663bdf..635a53d1ac98a 100644 --- a/x-pack/plugin/esql/compute/build.gradle +++ b/x-pack/plugin/esql/compute/build.gradle @@ -563,6 +563,11 @@ tasks.named('stringTemplates').configure { it.inputFile = keyExtractorInputFile it.outputFile = "org/elasticsearch/compute/operator/topn/KeyExtractorForLong.java" } + template { + it.properties = floatProperties + it.inputFile = keyExtractorInputFile + it.outputFile = "org/elasticsearch/compute/operator/topn/KeyExtractorForFloat.java" + } template { it.properties = doubleProperties it.inputFile = keyExtractorInputFile @@ -589,6 +594,11 @@ tasks.named('stringTemplates').configure { it.inputFile = valueExtractorInputFile it.outputFile = "org/elasticsearch/compute/operator/topn/ValueExtractorForLong.java" } + template { + it.properties = floatProperties + it.inputFile = valueExtractorInputFile + it.outputFile = "org/elasticsearch/compute/operator/topn/ValueExtractorForFloat.java" + } template { it.properties = doubleProperties it.inputFile = valueExtractorInputFile @@ -620,4 +630,9 @@ tasks.named('stringTemplates').configure { it.inputFile = resultBuilderInputFile it.outputFile = "org/elasticsearch/compute/operator/topn/ResultBuilderForDouble.java" } + template { + it.properties = floatProperties + it.inputFile = resultBuilderInputFile + it.outputFile = "org/elasticsearch/compute/operator/topn/ResultBuilderForFloat.java" + } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForFloat.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForFloat.java new file mode 100644 index 0000000000000..66cd1c88f5067 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForFloat.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator.topn; + +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; + +import java.util.Locale; + +/** + * Extracts sort keys for top-n from their {@link FloatBlock}s. + * This class is generated. Edit {@code X-KeyExtractor.java.st} instead. + */ +abstract class KeyExtractorForFloat implements KeyExtractor { + static KeyExtractorForFloat extractorFor(TopNEncoder encoder, boolean ascending, byte nul, byte nonNul, FloatBlock block) { + FloatVector v = block.asVector(); + if (v != null) { + return new KeyExtractorForFloat.FromVector(encoder, nul, nonNul, v); + } + if (ascending) { + return block.mvSortedAscending() + ? new KeyExtractorForFloat.MinFromAscendingBlock(encoder, nul, nonNul, block) + : new KeyExtractorForFloat.MinFromUnorderedBlock(encoder, nul, nonNul, block); + } + return block.mvSortedAscending() + ? new KeyExtractorForFloat.MaxFromAscendingBlock(encoder, nul, nonNul, block) + : new KeyExtractorForFloat.MaxFromUnorderedBlock(encoder, nul, nonNul, block); + } + + private final byte nul; + private final byte nonNul; + + KeyExtractorForFloat(TopNEncoder encoder, byte nul, byte nonNul) { + assert encoder == TopNEncoder.DEFAULT_SORTABLE; + this.nul = nul; + this.nonNul = nonNul; + } + + protected final int nonNul(BreakingBytesRefBuilder key, float value) { + key.append(nonNul); + TopNEncoder.DEFAULT_SORTABLE.encodeFloat(value, key); + return Float.BYTES + 1; + } + + protected final int nul(BreakingBytesRefBuilder key) { + key.append(nul); + return 1; + } + + @Override + public final String toString() { + return String.format(Locale.ROOT, "KeyExtractorForFloat%s(%s, %s)", getClass().getSimpleName(), nul, nonNul); + } + + static class FromVector extends KeyExtractorForFloat { + private final FloatVector vector; + + FromVector(TopNEncoder encoder, byte nul, byte nonNul, FloatVector vector) { + super(encoder, nul, nonNul); + this.vector = vector; + } + + @Override + public int writeKey(BreakingBytesRefBuilder key, int position) { + return nonNul(key, vector.getFloat(position)); + } + } + + static class MinFromAscendingBlock extends KeyExtractorForFloat { + private final FloatBlock block; + + MinFromAscendingBlock(TopNEncoder encoder, byte nul, byte nonNul, FloatBlock block) { + super(encoder, nul, nonNul); + this.block = block; + } + + @Override + public int writeKey(BreakingBytesRefBuilder key, int position) { + if (block.isNull(position)) { + return nul(key); + } + return nonNul(key, block.getFloat(block.getFirstValueIndex(position))); + } + } + + static class MaxFromAscendingBlock extends KeyExtractorForFloat { + private final FloatBlock block; + + MaxFromAscendingBlock(TopNEncoder encoder, byte nul, byte nonNul, FloatBlock block) { + super(encoder, nul, nonNul); + this.block = block; + } + + @Override + public int writeKey(BreakingBytesRefBuilder key, int position) { + if (block.isNull(position)) { + return nul(key); + } + return nonNul(key, block.getFloat(block.getFirstValueIndex(position) + block.getValueCount(position) - 1)); + } + } + + static class MinFromUnorderedBlock extends KeyExtractorForFloat { + private final FloatBlock block; + + MinFromUnorderedBlock(TopNEncoder encoder, byte nul, byte nonNul, FloatBlock block) { + super(encoder, nul, nonNul); + this.block = block; + } + + @Override + public int writeKey(BreakingBytesRefBuilder key, int position) { + int size = block.getValueCount(position); + if (size == 0) { + return nul(key); + } + int start = block.getFirstValueIndex(position); + int end = start + size; + float min = block.getFloat(start); + for (int i = start + 1; i < end; i++) { + min = Math.min(min, block.getFloat(i)); + } + return nonNul(key, min); + } + } + + static class MaxFromUnorderedBlock extends KeyExtractorForFloat { + private final FloatBlock block; + + MaxFromUnorderedBlock(TopNEncoder encoder, byte nul, byte nonNul, FloatBlock block) { + super(encoder, nul, nonNul); + this.block = block; + } + + @Override + public int writeKey(BreakingBytesRefBuilder key, int position) { + int size = block.getValueCount(position); + if (size == 0) { + return nul(key); + } + int start = block.getFirstValueIndex(position); + int end = start + size; + float max = block.getFloat(start); + for (int i = start + 1; i < end; i++) { + max = Math.max(max, block.getFloat(i)); + } + return nonNul(key, max); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForFloat.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForFloat.java new file mode 100644 index 0000000000000..a417f1c0b77d1 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForFloat.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator.topn; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.FloatBlock; + +/** + * Builds the resulting {@link FloatBlock} for some column in a top-n. + * This class is generated. Edit {@code X-ResultBuilder.java.st} instead. + */ +class ResultBuilderForFloat implements ResultBuilder { + private final FloatBlock.Builder builder; + + private final boolean inKey; + + /** + * The value previously set by {@link #decodeKey}. + */ + private float key; + + ResultBuilderForFloat(BlockFactory blockFactory, TopNEncoder encoder, boolean inKey, int initialSize) { + assert encoder == TopNEncoder.DEFAULT_UNSORTABLE : encoder.toString(); + this.inKey = inKey; + this.builder = blockFactory.newFloatBlockBuilder(initialSize); + } + + @Override + public void decodeKey(BytesRef keys) { + assert inKey; + key = TopNEncoder.DEFAULT_SORTABLE.decodeFloat(keys); + } + + @Override + public void decodeValue(BytesRef values) { + int count = TopNEncoder.DEFAULT_UNSORTABLE.decodeVInt(values); + switch (count) { + case 0 -> { + builder.appendNull(); + } + case 1 -> builder.appendFloat(inKey ? key : readValueFromValues(values)); + default -> { + builder.beginPositionEntry(); + for (int i = 0; i < count; i++) { + builder.appendFloat(readValueFromValues(values)); + } + builder.endPositionEntry(); + } + } + } + + private float readValueFromValues(BytesRef values) { + return TopNEncoder.DEFAULT_UNSORTABLE.decodeFloat(values); + } + + @Override + public FloatBlock build() { + return builder.build(); + } + + @Override + public String toString() { + return "ResultBuilderForFloat[inKey=" + inKey + "]"; + } + + @Override + public void close() { + builder.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForFloat.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForFloat.java new file mode 100644 index 0000000000000..295ef755a2225 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForFloat.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator.topn; + +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; + +/** + * Extracts non-sort-key values for top-n from their {@link FloatBlock}s. + * This class is generated. Edit {@code X-KeyExtractor.java.st} instead. + */ +abstract class ValueExtractorForFloat implements ValueExtractor { + static ValueExtractorForFloat extractorFor(TopNEncoder encoder, boolean inKey, FloatBlock block) { + FloatVector vector = block.asVector(); + if (vector != null) { + return new ValueExtractorForFloat.ForVector(encoder, inKey, vector); + } + return new ValueExtractorForFloat.ForBlock(encoder, inKey, block); + } + + protected final boolean inKey; + + ValueExtractorForFloat(TopNEncoder encoder, boolean inKey) { + assert encoder == TopNEncoder.DEFAULT_UNSORTABLE : encoder.toString(); + this.inKey = inKey; + } + + protected final void writeCount(BreakingBytesRefBuilder values, int count) { + TopNEncoder.DEFAULT_UNSORTABLE.encodeVInt(count, values); + } + + protected final void actualWriteValue(BreakingBytesRefBuilder values, float value) { + TopNEncoder.DEFAULT_UNSORTABLE.encodeFloat(value, values); + } + + static class ForVector extends ValueExtractorForFloat { + private final FloatVector vector; + + ForVector(TopNEncoder encoder, boolean inKey, FloatVector vector) { + super(encoder, inKey); + this.vector = vector; + } + + @Override + public void writeValue(BreakingBytesRefBuilder values, int position) { + writeCount(values, 1); + if (inKey) { + // will read results from the key + return; + } + actualWriteValue(values, vector.getFloat(position)); + } + } + + static class ForBlock extends ValueExtractorForFloat { + private final FloatBlock block; + + ForBlock(TopNEncoder encoder, boolean inKey, FloatBlock block) { + super(encoder, inKey); + this.block = block; + } + + @Override + public void writeValue(BreakingBytesRefBuilder values, int position) { + int size = block.getValueCount(position); + writeCount(values, size); + if (size == 1 && inKey) { + // Will read results from the key + return; + } + int start = block.getFirstValueIndex(position); + int end = start + size; + for (int i = start; i < end; i++) { + actualWriteValue(values, block.getFloat(i)); + } + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/DefaultUnsortableTopNEncoder.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/DefaultUnsortableTopNEncoder.java index 6529b79f95295..f1ae4cab8a4bd 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/DefaultUnsortableTopNEncoder.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/DefaultUnsortableTopNEncoder.java @@ -21,6 +21,7 @@ final class DefaultUnsortableTopNEncoder implements TopNEncoder { public static final VarHandle LONG = MethodHandles.byteArrayViewVarHandle(long[].class, ByteOrder.nativeOrder()); public static final VarHandle INT = MethodHandles.byteArrayViewVarHandle(int[].class, ByteOrder.nativeOrder()); + public static final VarHandle FLOAT = MethodHandles.byteArrayViewVarHandle(float[].class, ByteOrder.nativeOrder()); public static final VarHandle DOUBLE = MethodHandles.byteArrayViewVarHandle(double[].class, ByteOrder.nativeOrder()); @Override @@ -120,6 +121,24 @@ public int decodeInt(BytesRef bytes) { return v; } + @Override + public void encodeFloat(float value, BreakingBytesRefBuilder bytesRefBuilder) { + bytesRefBuilder.grow(bytesRefBuilder.length() + Float.BYTES); + FLOAT.set(bytesRefBuilder.bytes(), bytesRefBuilder.length(), value); + bytesRefBuilder.setLength(bytesRefBuilder.length() + Float.BYTES); + } + + @Override + public float decodeFloat(BytesRef bytes) { + if (bytes.length < Float.BYTES) { + throw new IllegalArgumentException("not enough bytes"); + } + float v = (float) FLOAT.get(bytes.bytes, bytes.offset); + bytes.offset += Float.BYTES; + bytes.length -= Float.BYTES; + return v; + } + @Override public void encodeDouble(double value, BreakingBytesRefBuilder bytesRefBuilder) { bytesRefBuilder.grow(bytesRefBuilder.length() + Double.BYTES); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/KeyExtractor.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/KeyExtractor.java index 0d7d4d476d7b6..b59a3b91e5f39 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/KeyExtractor.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/KeyExtractor.java @@ -12,6 +12,7 @@ import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; @@ -31,6 +32,7 @@ static KeyExtractor extractorFor(ElementType elementType, TopNEncoder encoder, b case BYTES_REF -> KeyExtractorForBytesRef.extractorFor(encoder, ascending, nul, nonNul, (BytesRefBlock) block); case INT -> KeyExtractorForInt.extractorFor(encoder, ascending, nul, nonNul, (IntBlock) block); case LONG -> KeyExtractorForLong.extractorFor(encoder, ascending, nul, nonNul, (LongBlock) block); + case FLOAT -> KeyExtractorForFloat.extractorFor(encoder, ascending, nul, nonNul, (FloatBlock) block); case DOUBLE -> KeyExtractorForDouble.extractorFor(encoder, ascending, nul, nonNul, (DoubleBlock) block); case NULL -> new KeyExtractorForNull(nul); default -> { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/ResultBuilder.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/ResultBuilder.java index bd2027cade78f..61c49bac7505d 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/ResultBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/ResultBuilder.java @@ -50,6 +50,7 @@ static ResultBuilder resultBuilderFor( case BYTES_REF -> new ResultBuilderForBytesRef(blockFactory, encoder, inKey, positions); case INT -> new ResultBuilderForInt(blockFactory, encoder, inKey, positions); case LONG -> new ResultBuilderForLong(blockFactory, encoder, inKey, positions); + case FLOAT -> new ResultBuilderForFloat(blockFactory, encoder, inKey, positions); case DOUBLE -> new ResultBuilderForDouble(blockFactory, encoder, inKey, positions); case NULL -> new ResultBuilderForNull(blockFactory); case DOC -> new ResultBuilderForDoc(blockFactory, positions); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/SortableTopNEncoder.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/SortableTopNEncoder.java index d04064e0a6777..6ba653c3adedf 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/SortableTopNEncoder.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/SortableTopNEncoder.java @@ -51,6 +51,24 @@ public final int decodeInt(BytesRef bytes) { return v; } + @Override + public final void encodeFloat(float value, BreakingBytesRefBuilder bytesRefBuilder) { + bytesRefBuilder.grow(bytesRefBuilder.length() + Integer.BYTES); + NumericUtils.intToSortableBytes(NumericUtils.floatToSortableInt(value), bytesRefBuilder.bytes(), bytesRefBuilder.length()); + bytesRefBuilder.setLength(bytesRefBuilder.length() + Integer.BYTES); + } + + @Override + public final float decodeFloat(BytesRef bytes) { + if (bytes.length < Float.BYTES) { + throw new IllegalArgumentException("not enough bytes"); + } + float v = NumericUtils.sortableIntToFloat(NumericUtils.sortableBytesToInt(bytes.bytes, bytes.offset)); + bytes.offset += Float.BYTES; + bytes.length -= Float.BYTES; + return v; + } + @Override public final void encodeDouble(double value, BreakingBytesRefBuilder bytesRefBuilder) { bytesRefBuilder.grow(bytesRefBuilder.length() + Long.BYTES); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNEncoder.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNEncoder.java index f1fb7cb7736c5..737a602543db7 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNEncoder.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNEncoder.java @@ -54,6 +54,10 @@ public interface TopNEncoder { int decodeInt(BytesRef bytes); + void encodeFloat(float value, BreakingBytesRefBuilder bytesRefBuilder); + + float decodeFloat(BytesRef bytes); + void encodeDouble(double value, BreakingBytesRefBuilder bytesRefBuilder); double decodeDouble(BytesRef bytes); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/ValueExtractor.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/ValueExtractor.java index af870dd336a74..b9336024eb404 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/ValueExtractor.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/ValueExtractor.java @@ -13,6 +13,7 @@ import org.elasticsearch.compute.data.DocBlock; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; @@ -32,6 +33,7 @@ static ValueExtractor extractorFor(ElementType elementType, TopNEncoder encoder, case BYTES_REF -> ValueExtractorForBytesRef.extractorFor(encoder, inKey, (BytesRefBlock) block); case INT -> ValueExtractorForInt.extractorFor(encoder, inKey, (IntBlock) block); case LONG -> ValueExtractorForLong.extractorFor(encoder, inKey, (LongBlock) block); + case FLOAT -> ValueExtractorForFloat.extractorFor(encoder, inKey, (FloatBlock) block); case DOUBLE -> ValueExtractorForDouble.extractorFor(encoder, inKey, (DoubleBlock) block); case NULL -> new ValueExtractorForNull(); case DOC -> new ValueExtractorForDoc(encoder, ((DocBlock) block).asVector()); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorTests.java index a8bf04e0846e2..be598f100563d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorTests.java @@ -331,6 +331,21 @@ public void testCompareLongs() { ); } + public void testCompareFloats() { + BlockFactory blockFactory = blockFactory(); + testCompare( + new Page( + blockFactory.newFloatBlockBuilder(2).appendFloat(-Float.MAX_VALUE).appendFloat(randomFloatBetween(-1000, -1, true)).build(), + blockFactory.newFloatBlockBuilder(2).appendFloat(randomFloatBetween(-1000, -1, true)).appendFloat(0.0f).build(), + blockFactory.newFloatBlockBuilder(2).appendFloat(0).appendFloat(randomFloatBetween(1, 1000, true)).build(), + blockFactory.newFloatBlockBuilder(2).appendFloat(randomLongBetween(1, 1000)).appendFloat(Float.MAX_VALUE).build(), + blockFactory.newFloatBlockBuilder(2).appendFloat(0.0f).appendFloat(Float.MAX_VALUE).build() + ), + FLOAT, + DEFAULT_SORTABLE + ); + } + public void testCompareDoubles() { BlockFactory blockFactory = blockFactory(); testCompare( @@ -505,7 +520,7 @@ public void testCollectAllValues() { encoders.add(DEFAULT_SORTABLE); for (ElementType e : ElementType.values()) { - if (e == ElementType.UNKNOWN || e == COMPOSITE || e == FLOAT) { + if (e == ElementType.UNKNOWN || e == COMPOSITE) { continue; } elementTypes.add(e); @@ -577,7 +592,7 @@ public void testCollectAllValues_RandomMultiValues() { for (int type = 0; type < blocksCount; type++) { ElementType e = randomFrom(ElementType.values()); - if (e == ElementType.UNKNOWN || e == COMPOSITE || e == FLOAT) { + if (e == ElementType.UNKNOWN || e == COMPOSITE) { continue; } elementTypes.add(e); @@ -965,7 +980,7 @@ public void testRandomMultiValuesTopN() { for (int type = 0; type < blocksCount; type++) { ElementType e = randomValueOtherThanMany( - t -> t == ElementType.UNKNOWN || t == ElementType.DOC || t == COMPOSITE || t == FLOAT, + t -> t == ElementType.UNKNOWN || t == ElementType.DOC || t == COMPOSITE, () -> randomFrom(ElementType.values()) ); elementTypes.add(e); From 4009e65fcbe4534262fc94f41cd3393fa6a7de66 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Wed, 19 Jun 2024 10:19:27 +0200 Subject: [PATCH 20/44] Fix Gradle cache cacheability in ThirdPartyAuditTask (#109823) Fixes forbiddenAPIsClasspath property to be cacheable as we have some custom manifest data in our jars that are never the same across builds. We might want to reconsider if all those custom manifest in our attributes are really necessary --- .../gradle/internal/precommit/ThirdPartyAuditTask.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java index 58b967d0a7722..4263ef2b1f76f 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java @@ -119,8 +119,8 @@ public Property getTargetCompatibility() { return targetCompatibility; } + @Classpath @InputFiles - @PathSensitive(PathSensitivity.NAME_ONLY) public abstract ConfigurableFileCollection getForbiddenAPIsClasspath(); @InputFile From b60d77e0746df3e9e198ca97fc6bfb327c261138 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 19 Jun 2024 09:26:08 +0100 Subject: [PATCH 21/44] [ML] Missing chunkers for AzureOpenAIService, AzureAIStudioService and HuggingFace embeddings (#109875) Adds chunking for AzureOpenAIService, AzureAIStudioService and the HuggingFace text embedding service --- .../common/EmbeddingRequestChunker.java | 1 - .../azureaistudio/AzureAiStudioService.java | 32 ++--- .../azureopenai/AzureOpenAiService.java | 19 ++- .../huggingface/HuggingFaceBaseService.java | 56 +------- .../huggingface/HuggingFaceService.java | 37 ++++++ .../elser/HuggingFaceElserService.java | 56 ++++++++ .../services/mistral/MistralService.java | 18 --- .../services/openai/OpenAiServiceFields.java | 2 +- .../AzureAiStudioServiceTests.java | 46 ++++--- .../azureopenai/AzureOpenAiServiceTests.java | 47 ++++--- .../HuggingFaceBaseServiceTests.java | 2 +- .../HuggingFaceElserServiceTests.java | 125 ++++++++++++++++++ .../huggingface/HuggingFaceServiceTests.java | 34 +++-- 13 files changed, 322 insertions(+), 153 deletions(-) create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceElserServiceTests.java diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/EmbeddingRequestChunker.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/EmbeddingRequestChunker.java index aa76912e4ece4..01a345909c6b1 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/EmbeddingRequestChunker.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/EmbeddingRequestChunker.java @@ -191,7 +191,6 @@ public void onResponse(InferenceServiceResults inferenceServiceResults) { case FLOAT -> handleFloatResults(inferenceServiceResults); case BYTE -> handleByteResults(inferenceServiceResults); } - ; } private void handleFloatResults(InferenceServiceResults inferenceServiceResults) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java index 214c652a97545..65c3db4093249 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java @@ -24,10 +24,7 @@ import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.core.inference.results.ErrorChunkedInferenceResults; -import org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResults; -import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; -import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; +import org.elasticsearch.xpack.inference.common.EmbeddingRequestChunker; import org.elasticsearch.xpack.inference.external.action.azureaistudio.AzureAiStudioActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; @@ -44,7 +41,6 @@ import java.util.Map; import java.util.Set; -import static org.elasticsearch.xpack.core.inference.results.ResultUtils.createInvalidChunkedResultException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; @@ -53,6 +49,7 @@ import static org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioProviderCapabilities.providerAllowsEndpointTypeForTask; import static org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioProviderCapabilities.providerAllowsTaskType; import static org.elasticsearch.xpack.inference.services.azureaistudio.completion.AzureAiStudioChatCompletionTaskSettings.DEFAULT_MAX_NEW_TOKENS; +import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.EMBEDDING_MAX_BATCH_SIZE; public class AzureAiStudioService extends SenderService { @@ -105,23 +102,16 @@ protected void doChunkedInfer( TimeValue timeout, ActionListener> listener ) { - ActionListener inferListener = listener.delegateFailureAndWrap( - (delegate, response) -> delegate.onResponse(translateToChunkedResults(input, response)) - ); - - doInfer(model, input, taskSettings, inputType, timeout, inferListener); - } - - private static List translateToChunkedResults( - List inputs, - InferenceServiceResults inferenceResults - ) { - if (inferenceResults instanceof InferenceTextEmbeddingFloatResults textEmbeddingResults) { - return InferenceChunkedTextEmbeddingFloatResults.listOf(inputs, textEmbeddingResults); - } else if (inferenceResults instanceof ErrorInferenceResults error) { - return List.of(new ErrorChunkedInferenceResults(error.getException())); + if (model instanceof AzureAiStudioModel baseAzureAiStudioModel) { + var actionCreator = new AzureAiStudioActionCreator(getSender(), getServiceComponents()); + var batchedRequests = new EmbeddingRequestChunker(input, EMBEDDING_MAX_BATCH_SIZE, EmbeddingRequestChunker.EmbeddingType.FLOAT) + .batchRequestsWithListeners(listener); + for (var request : batchedRequests) { + var action = baseAzureAiStudioModel.accept(actionCreator, taskSettings); + action.execute(new DocumentsOnlyInput(request.batch().inputs()), timeout, request.listener()); + } } else { - throw createInvalidChunkedResultException(InferenceTextEmbeddingFloatResults.NAME, inferenceResults.getWriteableName()); + listener.onFailure(createInvalidModelException(model)); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java index bd52bdb165148..5c25ae62517dd 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java @@ -28,6 +28,7 @@ import org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResults; import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; +import org.elasticsearch.xpack.inference.common.EmbeddingRequestChunker; import org.elasticsearch.xpack.inference.external.action.azureopenai.AzureOpenAiActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; @@ -49,6 +50,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.EMBEDDING_MAX_BATCH_SIZE; public class AzureOpenAiService extends SenderService { public static final String NAME = "azureopenai"; @@ -230,11 +232,18 @@ protected void doChunkedInfer( TimeValue timeout, ActionListener> listener ) { - ActionListener inferListener = listener.delegateFailureAndWrap( - (delegate, response) -> delegate.onResponse(translateToChunkedResults(input, response)) - ); - - doInfer(model, input, taskSettings, inputType, timeout, inferListener); + if (model instanceof AzureOpenAiModel == false) { + listener.onFailure(createInvalidModelException(model)); + return; + } + AzureOpenAiModel azureOpenAiModel = (AzureOpenAiModel) model; + var actionCreator = new AzureOpenAiActionCreator(getSender(), getServiceComponents()); + var batchedRequests = new EmbeddingRequestChunker(input, EMBEDDING_MAX_BATCH_SIZE, EmbeddingRequestChunker.EmbeddingType.FLOAT) + .batchRequestsWithListeners(listener); + for (var request : batchedRequests) { + var action = azureOpenAiModel.accept(actionCreator, taskSettings); + action.execute(new DocumentsOnlyInput(request.batch().inputs()), timeout, request.listener()); + } } private static List translateToChunkedResults( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseService.java index 78307ab280cb6..27947f499fa18 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseService.java @@ -8,23 +8,13 @@ package org.elasticsearch.xpack.inference.services.huggingface; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.Strings; -import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.TaskType; -import org.elasticsearch.xpack.core.inference.results.ErrorChunkedInferenceResults; -import org.elasticsearch.xpack.core.inference.results.InferenceChunkedSparseEmbeddingResults; -import org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResults; -import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; -import org.elasticsearch.xpack.core.inference.results.SparseEmbeddingResults; -import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; import org.elasticsearch.xpack.inference.external.action.huggingface.HuggingFaceActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; @@ -36,7 +26,6 @@ import java.util.Map; import java.util.Set; -import static org.elasticsearch.xpack.core.inference.results.ResultUtils.createInvalidChunkedResultException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; @@ -44,6 +33,13 @@ public abstract class HuggingFaceBaseService extends SenderService { + /** + * The optimal batch size depends on the hardware the model is deployed on. + * For HuggingFace use a conservatively small max batch size as it is + * unknown how the model is deployed + */ + static final int EMBEDDING_MAX_BATCH_SIZE = 20; + public HuggingFaceBaseService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } @@ -153,42 +149,4 @@ protected void doInfer( ) { throw new UnsupportedOperationException("Hugging Face service does not support inference with query input"); } - - @Override - protected void doChunkedInfer( - Model model, - @Nullable String query, - List input, - Map taskSettings, - InputType inputType, - ChunkingOptions chunkingOptions, - TimeValue timeout, - ActionListener> listener - ) { - ActionListener inferListener = listener.delegateFailureAndWrap( - (delegate, response) -> delegate.onResponse(translateToChunkedResults(input, response)) - ); - - doInfer(model, input, taskSettings, inputType, timeout, inferListener); - } - - private static List translateToChunkedResults( - List inputs, - InferenceServiceResults inferenceResults - ) { - if (inferenceResults instanceof InferenceTextEmbeddingFloatResults textEmbeddingResults) { - return InferenceChunkedTextEmbeddingFloatResults.listOf(inputs, textEmbeddingResults); - } else if (inferenceResults instanceof SparseEmbeddingResults sparseEmbeddingResults) { - return InferenceChunkedSparseEmbeddingResults.listOf(inputs, sparseEmbeddingResults); - } else if (inferenceResults instanceof ErrorInferenceResults error) { - return List.of(new ErrorChunkedInferenceResults(error.getException())); - } else { - String expectedClasses = Strings.format( - "One of [%s,%s]", - InferenceTextEmbeddingFloatResults.class.getSimpleName(), - SparseEmbeddingResults.class.getSimpleName() - ); - throw createInvalidChunkedResultException(expectedClasses, inferenceResults.getWriteableName()); - } - } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java index c0438b3759a65..161ab6c47bfeb 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java @@ -12,9 +12,16 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.ChunkedInferenceServiceResults; +import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.inference.common.EmbeddingRequestChunker; +import org.elasticsearch.xpack.inference.external.action.huggingface.HuggingFaceActionCreator; +import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.ServiceComponents; @@ -22,8 +29,11 @@ import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserModel; import org.elasticsearch.xpack.inference.services.huggingface.embeddings.HuggingFaceEmbeddingsModel; +import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; + public class HuggingFaceService extends HuggingFaceBaseService { public static final String NAME = "hugging_face"; @@ -79,6 +89,33 @@ private static HuggingFaceEmbeddingsModel updateModelWithEmbeddingDetails(Huggin return new HuggingFaceEmbeddingsModel(model, serviceSettings); } + @Override + protected void doChunkedInfer( + Model model, + @Nullable String query, + List input, + Map taskSettings, + InputType inputType, + ChunkingOptions chunkingOptions, + TimeValue timeout, + ActionListener> listener + ) { + if (model instanceof HuggingFaceModel == false) { + listener.onFailure(createInvalidModelException(model)); + return; + } + + var huggingFaceModel = (HuggingFaceModel) model; + var actionCreator = new HuggingFaceActionCreator(getSender(), getServiceComponents()); + + var batchedRequests = new EmbeddingRequestChunker(input, EMBEDDING_MAX_BATCH_SIZE, EmbeddingRequestChunker.EmbeddingType.FLOAT) + .batchRequestsWithListeners(listener); + for (var request : batchedRequests) { + var action = huggingFaceModel.accept(actionCreator); + action.execute(new DocumentsOnlyInput(request.batch().inputs()), timeout, request.listener()); + } + } + @Override public String name() { return NAME; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java index d3099e96ee7c1..ee35869c6a8d1 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java @@ -10,17 +10,34 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.ChunkedInferenceServiceResults; +import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.Model; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.inference.results.ErrorChunkedInferenceResults; +import org.elasticsearch.xpack.core.inference.results.InferenceChunkedSparseEmbeddingResults; +import org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResults; +import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; +import org.elasticsearch.xpack.core.inference.results.SparseEmbeddingResults; +import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.huggingface.HuggingFaceBaseService; import org.elasticsearch.xpack.inference.services.huggingface.HuggingFaceModel; +import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.core.inference.results.ResultUtils.createInvalidChunkedResultException; + public class HuggingFaceElserService extends HuggingFaceBaseService { public static final String NAME = "hugging_face_elser"; @@ -48,6 +65,45 @@ protected HuggingFaceModel createModel( }; } + @Override + protected void doChunkedInfer( + Model model, + @Nullable String query, + List input, + Map taskSettings, + InputType inputType, + ChunkingOptions chunkingOptions, + TimeValue timeout, + ActionListener> listener + ) { + ActionListener inferListener = listener.delegateFailureAndWrap( + (delegate, response) -> delegate.onResponse(translateToChunkedResults(input, response)) + ); + + // TODO chunking sparse embeddings not implemented + doInfer(model, input, taskSettings, inputType, timeout, inferListener); + } + + private static List translateToChunkedResults( + List inputs, + InferenceServiceResults inferenceResults + ) { + if (inferenceResults instanceof InferenceTextEmbeddingFloatResults textEmbeddingResults) { + return InferenceChunkedTextEmbeddingFloatResults.listOf(inputs, textEmbeddingResults); + } else if (inferenceResults instanceof SparseEmbeddingResults sparseEmbeddingResults) { + return InferenceChunkedSparseEmbeddingResults.listOf(inputs, sparseEmbeddingResults); + } else if (inferenceResults instanceof ErrorInferenceResults error) { + return List.of(new ErrorChunkedInferenceResults(error.getException())); + } else { + String expectedClasses = Strings.format( + "One of [%s,%s]", + InferenceTextEmbeddingFloatResults.class.getSimpleName(), + SparseEmbeddingResults.class.getSimpleName() + ); + throw createInvalidChunkedResultException(expectedClasses, inferenceResults.getWriteableName()); + } + } + @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_8_12_0; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java index 4601df6f14039..bcef31031cb0c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java @@ -22,10 +22,6 @@ import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.core.inference.results.ErrorChunkedInferenceResults; -import org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResults; -import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; -import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; import org.elasticsearch.xpack.inference.common.EmbeddingRequestChunker; import org.elasticsearch.xpack.inference.external.action.mistral.MistralActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; @@ -42,7 +38,6 @@ import java.util.Set; import static org.elasticsearch.TransportVersions.ADD_MISTRAL_EMBEDDINGS_INFERENCE; -import static org.elasticsearch.xpack.core.inference.results.ResultUtils.createInvalidChunkedResultException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; @@ -117,19 +112,6 @@ protected void doChunkedInfer( } } - private static List translateToChunkedResults( - List inputs, - InferenceServiceResults inferenceResults - ) { - if (inferenceResults instanceof InferenceTextEmbeddingFloatResults textEmbeddingResults) { - return InferenceChunkedTextEmbeddingFloatResults.listOf(inputs, textEmbeddingResults); - } else if (inferenceResults instanceof ErrorInferenceResults error) { - return List.of(new ErrorChunkedInferenceResults(error.getException())); - } else { - throw createInvalidChunkedResultException(InferenceChunkedTextEmbeddingFloatResults.NAME, inferenceResults.getWriteableName()); - } - } - @Override public String name() { return NAME; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceFields.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceFields.java index ca2bc56866aa5..38b31dbe4bc22 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceFields.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceFields.java @@ -16,6 +16,6 @@ public class OpenAiServiceFields { /** * Taken from https://platform.openai.com/docs/api-reference/embeddings/create */ - static final int EMBEDDING_MAX_BATCH_SIZE = 2048; + public static final int EMBEDDING_MAX_BATCH_SIZE = 2048; } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java index 18d7b6e072fe3..709cc4d3494fd 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java @@ -33,7 +33,6 @@ import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.core.inference.results.ChatCompletionResults; import org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResults; -import org.elasticsearch.xpack.core.ml.inference.results.ChunkedNlpInferenceResults; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; @@ -55,14 +54,12 @@ import org.junit.Before; import java.io.IOException; -import java.net.URISyntaxException; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResultsTests.asMapWithListsInsteadOfArrays; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; @@ -849,7 +846,7 @@ public void testInfer_ThrowsErrorWhenModelIsNotAzureAiStudioModel() throws IOExc verifyNoMoreInteractions(sender); } - public void testChunkedInfer_Embeddings_CallsInfer_ConvertsFloatResponse() throws IOException, URISyntaxException { + public void testChunkedInfer() throws IOException { var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var service = new AzureAiStudioService(senderFactory, createWithEmptySettings(threadPool))) { @@ -865,6 +862,14 @@ public void testChunkedInfer_Embeddings_CallsInfer_ConvertsFloatResponse() throw 0.0123, -0.0123 ] + }, + { + "object": "embedding", + "index": 1, + "embedding": [ + 1.0123, + -1.0123 + ] } ], "model": "text-embedding-ada-002-v2", @@ -892,7 +897,7 @@ public void testChunkedInfer_Embeddings_CallsInfer_ConvertsFloatResponse() throw PlainActionFuture> listener = new PlainActionFuture<>(); service.chunkedInfer( model, - List.of("abc"), + List.of("foo", "bar"), new HashMap<>(), InputType.INGEST, new ChunkingOptions(null, null), @@ -900,20 +905,23 @@ public void testChunkedInfer_Embeddings_CallsInfer_ConvertsFloatResponse() throw listener ); - var result = listener.actionGet(TIMEOUT).get(0); - assertThat(result, CoreMatchers.instanceOf(InferenceChunkedTextEmbeddingFloatResults.class)); + var results = listener.actionGet(TIMEOUT); + assertThat(results, hasSize(2)); + { + assertThat(results.get(0), CoreMatchers.instanceOf(InferenceChunkedTextEmbeddingFloatResults.class)); + var floatResult = (InferenceChunkedTextEmbeddingFloatResults) results.get(0); + assertThat(floatResult.chunks(), hasSize(1)); + assertEquals("foo", floatResult.chunks().get(0).matchedText()); + assertArrayEquals(new float[] { 0.0123f, -0.0123f }, floatResult.chunks().get(0).embedding(), 0.0f); + } + { + assertThat(results.get(1), CoreMatchers.instanceOf(InferenceChunkedTextEmbeddingFloatResults.class)); + var floatResult = (InferenceChunkedTextEmbeddingFloatResults) results.get(1); + assertThat(floatResult.chunks(), hasSize(1)); + assertEquals("bar", floatResult.chunks().get(0).matchedText()); + assertArrayEquals(new float[] { 1.0123f, -1.0123f }, floatResult.chunks().get(0).embedding(), 0.0f); + } - assertThat( - asMapWithListsInsteadOfArrays((InferenceChunkedTextEmbeddingFloatResults) result), - Matchers.is( - Map.of( - InferenceChunkedTextEmbeddingFloatResults.FIELD_NAME, - List.of( - Map.of(ChunkedNlpInferenceResults.TEXT, "abc", ChunkedNlpInferenceResults.INFERENCE, List.of(0.0123f, -0.0123f)) - ) - ) - ) - ); assertThat(webServer.requests(), hasSize(1)); assertNull(webServer.requests().get(0).getUri().getQuery()); assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); @@ -921,7 +929,7 @@ public void testChunkedInfer_Embeddings_CallsInfer_ConvertsFloatResponse() throw var requestMap = entityAsMap(webServer.requests().get(0).getBody()); assertThat(requestMap.size(), Matchers.is(2)); - assertThat(requestMap.get("input"), Matchers.is(List.of("abc"))); + assertThat(requestMap.get("input"), Matchers.is(List.of("foo", "bar"))); assertThat(requestMap.get("user"), Matchers.is("user")); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java index e59664d0e0129..de474ea1b4237 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResults; -import org.elasticsearch.xpack.core.ml.inference.results.ChunkedNlpInferenceResults; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; @@ -55,7 +54,6 @@ import java.util.Set; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResultsTests.asMapWithListsInsteadOfArrays; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; @@ -1079,8 +1077,16 @@ public void testChunkedInfer_CallsInfer_ConvertsFloatResponse() throws IOExcepti "object": "embedding", "index": 0, "embedding": [ - 0.0123, - -0.0123 + 0.123, + -0.123 + ] + }, + { + "object": "embedding", + "index": 1, + "embedding": [ + 1.123, + -1.123 ] } ], @@ -1098,7 +1104,7 @@ public void testChunkedInfer_CallsInfer_ConvertsFloatResponse() throws IOExcepti PlainActionFuture> listener = new PlainActionFuture<>(); service.chunkedInfer( model, - List.of("abc"), + List.of("foo", "bar"), new HashMap<>(), InputType.INGEST, new ChunkingOptions(null, null), @@ -1106,20 +1112,23 @@ public void testChunkedInfer_CallsInfer_ConvertsFloatResponse() throws IOExcepti listener ); - var result = listener.actionGet(TIMEOUT).get(0); - assertThat(result, CoreMatchers.instanceOf(InferenceChunkedTextEmbeddingFloatResults.class)); + var results = listener.actionGet(TIMEOUT); + assertThat(results, hasSize(2)); + { + assertThat(results.get(0), CoreMatchers.instanceOf(InferenceChunkedTextEmbeddingFloatResults.class)); + var floatResult = (InferenceChunkedTextEmbeddingFloatResults) results.get(0); + assertThat(floatResult.chunks(), hasSize(1)); + assertEquals("foo", floatResult.chunks().get(0).matchedText()); + assertArrayEquals(new float[] { 0.123f, -0.123f }, floatResult.chunks().get(0).embedding(), 0.0f); + } + { + assertThat(results.get(1), CoreMatchers.instanceOf(InferenceChunkedTextEmbeddingFloatResults.class)); + var floatResult = (InferenceChunkedTextEmbeddingFloatResults) results.get(1); + assertThat(floatResult.chunks(), hasSize(1)); + assertEquals("bar", floatResult.chunks().get(0).matchedText()); + assertArrayEquals(new float[] { 1.123f, -1.123f }, floatResult.chunks().get(0).embedding(), 0.0f); + } - assertThat( - asMapWithListsInsteadOfArrays((InferenceChunkedTextEmbeddingFloatResults) result), - Matchers.is( - Map.of( - InferenceChunkedTextEmbeddingFloatResults.FIELD_NAME, - List.of( - Map.of(ChunkedNlpInferenceResults.TEXT, "abc", ChunkedNlpInferenceResults.INFERENCE, List.of(0.0123f, -0.0123f)) - ) - ) - ) - ); assertThat(webServer.requests(), hasSize(1)); assertNull(webServer.requests().get(0).getUri().getQuery()); assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); @@ -1127,7 +1136,7 @@ public void testChunkedInfer_CallsInfer_ConvertsFloatResponse() throws IOExcepti var requestMap = entityAsMap(webServer.requests().get(0).getBody()); assertThat(requestMap.size(), Matchers.is(2)); - assertThat(requestMap.get("input"), Matchers.is(List.of("abc"))); + assertThat(requestMap.get("input"), Matchers.is(List.of("foo", "bar"))); assertThat(requestMap.get("user"), Matchers.is("user")); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseServiceTests.java index fd7e1b48b7e03..22c3b7895460a 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseServiceTests.java @@ -90,7 +90,7 @@ public void testInfer_ThrowsErrorWhenModelIsNotHuggingFaceModel() throws IOExcep verifyNoMoreInteractions(sender); } - private static final class TestService extends HuggingFaceBaseService { + private static final class TestService extends HuggingFaceService { TestService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceElserServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceElserServiceTests.java new file mode 100644 index 0000000000000..33ab75a543381 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceElserServiceTests.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.huggingface; + +import org.apache.http.HttpHeaders; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.ChunkedInferenceServiceResults; +import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.core.inference.results.InferenceChunkedSparseEmbeddingResults; +import org.elasticsearch.xpack.core.ml.inference.results.ChunkedNlpInferenceResults; +import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserModelTests; +import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserService; +import org.hamcrest.MatcherAssert; +import org.hamcrest.Matchers; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.mockito.Mockito.mock; + +public class HuggingFaceElserServiceTests extends ESTestCase { + + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + + private final MockWebServer webServer = new MockWebServer(); + private ThreadPool threadPool; + private HttpClientManager clientManager; + + @Before + public void init() throws Exception { + webServer.start(); + threadPool = createThreadPool(inferenceUtilityPool()); + clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mock(ThrottlerManager.class)); + } + + @After + public void shutdown() throws IOException { + clientManager.close(); + terminate(threadPool); + webServer.close(); + } + + public void testChunkedInfer_CallsInfer_Elser_ConvertsFloatResponse() throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + + try (var service = new HuggingFaceElserService(senderFactory, createWithEmptySettings(threadPool))) { + + String responseJson = """ + [ + { + ".": 0.133155956864357 + } + ] + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var model = HuggingFaceElserModelTests.createModel(getUrl(webServer), "secret"); + PlainActionFuture> listener = new PlainActionFuture<>(); + service.chunkedInfer( + model, + List.of("abc"), + new HashMap<>(), + InputType.INGEST, + new ChunkingOptions(null, null), + InferenceAction.Request.DEFAULT_TIMEOUT, + listener + ); + + var result = listener.actionGet(TIMEOUT).get(0); + + MatcherAssert.assertThat( + result.asMap(), + Matchers.is( + Map.of( + InferenceChunkedSparseEmbeddingResults.FIELD_NAME, + List.of( + Map.of(ChunkedNlpInferenceResults.TEXT, "abc", ChunkedNlpInferenceResults.INFERENCE, Map.of(".", 0.13315596f)) + ) + ) + ) + ); + + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat( + webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), + equalTo(XContentType.JSON.mediaTypeWithoutParameters()) + ); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), Matchers.is(1)); + assertThat(requestMap.get("inputs"), Matchers.is(List.of("abc"))); + } + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java index a36306e40f5cb..a855437ce0738 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.inference.action.InferenceAction; -import org.elasticsearch.xpack.core.inference.results.InferenceChunkedSparseEmbeddingResults; import org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResults; import org.elasticsearch.xpack.core.ml.inference.results.ChunkedNlpInferenceResults; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; @@ -649,21 +648,22 @@ public void testChunkedInfer_CallsInfer_TextEmbedding_ConvertsFloatResponse() th } } - public void testChunkedInfer_CallsInfer_Elser_ConvertsFloatResponse() throws IOException { + public void testChunkedInfer() throws IOException { var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var service = new HuggingFaceService(senderFactory, createWithEmptySettings(threadPool))) { String responseJson = """ [ - { - ".": 0.133155956864357 - } + [ + 0.123, + -0.123 + ] ] """; webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); - var model = HuggingFaceElserModelTests.createModel(getUrl(webServer), "secret"); + var model = HuggingFaceEmbeddingsModelTests.createModel(getUrl(webServer), "secret"); PlainActionFuture> listener = new PlainActionFuture<>(); service.chunkedInfer( model, @@ -675,19 +675,15 @@ public void testChunkedInfer_CallsInfer_Elser_ConvertsFloatResponse() throws IOE listener ); - var result = listener.actionGet(TIMEOUT).get(0); - - MatcherAssert.assertThat( - result.asMap(), - Matchers.is( - Map.of( - InferenceChunkedSparseEmbeddingResults.FIELD_NAME, - List.of( - Map.of(ChunkedNlpInferenceResults.TEXT, "abc", ChunkedNlpInferenceResults.INFERENCE, Map.of(".", 0.13315596f)) - ) - ) - ) - ); + var results = listener.actionGet(TIMEOUT); + assertThat(results, hasSize(1)); + { + assertThat(results.get(0), CoreMatchers.instanceOf(InferenceChunkedTextEmbeddingFloatResults.class)); + var floatResult = (InferenceChunkedTextEmbeddingFloatResults) results.get(0); + assertThat(floatResult.chunks(), hasSize(1)); + assertEquals("abc", floatResult.chunks().get(0).matchedText()); + assertArrayEquals(new float[] { 0.123f, -0.123f }, floatResult.chunks().get(0).embedding(), 0.0f); + } assertThat(webServer.requests(), hasSize(1)); assertNull(webServer.requests().get(0).getUri().getQuery()); From d5488ad78dd615fb46f791ae09355c8ed768c022 Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Wed, 19 Jun 2024 09:37:15 +0100 Subject: [PATCH 22/44] ES|QL enable FloatBlock serialization (#109858) This commit enables ES|QL FloatBlock serialization. --- .../org/elasticsearch/compute/data/Block.java | 1 + .../compute/data/BasicPageTests.java | 20 ++++++++---- .../compute/data/BlockSerializationTests.java | 31 +++++++++++++++++++ 3 files changed, 46 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java index ca3ce1349c47f..282bc9064b308 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java @@ -276,6 +276,7 @@ static List getNamedWriteables() { return List.of( IntBlock.ENTRY, LongBlock.ENTRY, + FloatBlock.ENTRY, DoubleBlock.ENTRY, BytesRefBlock.ENTRY, BooleanBlock.ENTRY, diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java index f76ff0708120b..e0cf277e99967 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java @@ -136,14 +136,15 @@ public void testEqualityAndHashCode() throws IOException { int blockCount = randomIntBetween(1, 256); Block[] blocks = new Block[blockCount]; for (int blockIndex = 0; blockIndex < blockCount; blockIndex++) { - blocks[blockIndex] = switch (randomInt(6)) { + blocks[blockIndex] = switch (randomInt(7)) { case 0 -> blockFactory.newIntArrayVector(randomInts(positions).toArray(), positions).asBlock(); case 1 -> blockFactory.newLongArrayVector(randomLongs(positions).toArray(), positions).asBlock(); - case 2 -> blockFactory.newDoubleArrayVector(randomDoubles(positions).toArray(), positions).asBlock(); - case 3 -> blockFactory.newConstantIntBlockWith(randomInt(), positions); - case 4 -> blockFactory.newConstantLongBlockWith(randomLong(), positions); - case 5 -> blockFactory.newConstantDoubleBlockWith(randomDouble(), positions); - case 6 -> blockFactory.newConstantBytesRefBlockWith(new BytesRef(Integer.toHexString(randomInt())), positions); + case 2 -> blockFactory.newFloatArrayVector(randomFloats(positions), positions).asBlock(); + case 3 -> blockFactory.newDoubleArrayVector(randomDoubles(positions).toArray(), positions).asBlock(); + case 4 -> blockFactory.newConstantIntBlockWith(randomInt(), positions); + case 5 -> blockFactory.newConstantLongBlockWith(randomLong(), positions); + case 6 -> blockFactory.newConstantDoubleBlockWith(randomDouble(), positions); + case 7 -> blockFactory.newConstantBytesRefBlockWith(new BytesRef(Integer.toHexString(randomInt())), positions); default -> throw new AssertionError(); }; } @@ -184,6 +185,7 @@ public void testPageSerializationSimple() throws IOException { Page origPage = new Page( blockFactory.newIntArrayVector(IntStream.range(0, 10).toArray(), 10).asBlock(), blockFactory.newLongArrayVector(LongStream.range(10, 20).toArray(), 10).asBlock(), + blockFactory.newFloatArrayVector(randomFloats(10), 10).asBlock(), blockFactory.newDoubleArrayVector(LongStream.range(30, 40).mapToDouble(i -> i).toArray(), 10).asBlock(), blockFactory.newBytesRefArrayVector(bytesRefArrayOf("0a", "1b", "2c", "3d", "4e", "5f", "6g", "7h", "8i", "9j"), 10).asBlock(), blockFactory.newConstantIntBlockWith(randomInt(), 10), @@ -248,4 +250,10 @@ BytesRefArray bytesRefArrayOf(String... values) { Arrays.stream(values).map(BytesRef::new).forEach(array::append); return array; } + + float[] randomFloats(int size) { + float[] fa = new float[size]; + IntStream.range(0, size).forEach(i -> fa[i] = randomFloat()); + return fa; + } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java index 2daf7755841f7..8ca02b64f01ff 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java @@ -39,6 +39,10 @@ public void testConstantLongBlockLong() throws IOException { assertConstantBlockImpl(blockFactory.newConstantLongBlockWith(randomLong(), randomIntBetween(1, 8192))); } + public void testConstantFloatBlock() throws IOException { + assertConstantBlockImpl(blockFactory.newConstantFloatBlockWith(randomFloat(), randomIntBetween(1, 8192))); + } + public void testConstantDoubleBlock() throws IOException { assertConstantBlockImpl(blockFactory.newConstantDoubleBlockWith(randomDouble(), randomIntBetween(1, 8192))); } @@ -81,6 +85,17 @@ public void testEmptyLongBlock() throws IOException { } } + public void testEmptyFloatBlock() throws IOException { + assertEmptyBlock(blockFactory.newFloatBlockBuilder(0).build()); + try (FloatBlock toFilter = blockFactory.newFloatBlockBuilder(0).appendNull().build()) { + assertEmptyBlock(toFilter.filter()); + } + assertEmptyBlock(blockFactory.newFloatVectorBuilder(0).build().asBlock()); + try (FloatVector toFilter = blockFactory.newFloatVectorBuilder(0).appendFloat(randomFloat()).build()) { + assertEmptyBlock(toFilter.filter().asBlock()); + } + } + public void testEmptyDoubleBlock() throws IOException { assertEmptyBlock(blockFactory.newDoubleBlockBuilder(0).build()); try (DoubleBlock toFilter = blockFactory.newDoubleBlockBuilder(0).appendNull().build()) { @@ -140,6 +155,22 @@ public void testFilterLongBlock() throws IOException { } } + public void testFilterFloatBlock() throws IOException { + try (FloatBlock toFilter = blockFactory.newFloatBlockBuilder(0).appendFloat(1).appendFloat(2).build()) { + assertFilterBlock(toFilter.filter(1)); + } + try (FloatBlock toFilter = blockFactory.newFloatBlockBuilder(1).appendFloat(randomFloat()).appendNull().build()) { + assertFilterBlock(toFilter.filter(0)); + } + try (FloatVector toFilter = blockFactory.newFloatVectorBuilder(1).appendFloat(randomFloat()).build()) { + assertFilterBlock(toFilter.filter(0).asBlock()); + + } + try (FloatVector toFilter = blockFactory.newFloatVectorBuilder(1).appendFloat(randomFloat()).appendFloat(randomFloat()).build()) { + assertFilterBlock(toFilter.filter(0).asBlock()); + } + } + public void testFilterDoubleBlock() throws IOException { try (DoubleBlock toFilter = blockFactory.newDoubleBlockBuilder(0).appendDouble(1).appendDouble(2).build()) { assertFilterBlock(toFilter.filter(1)); From 330e1defd7c5a30098fbedeaf3f099378d715296 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 19 Jun 2024 10:57:48 +0100 Subject: [PATCH 23/44] AwaitsFix: https://github.com/elastic/elasticsearch/issues/109904 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index abb1b0841f2fb..2bd400c47a7aa 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -76,6 +76,8 @@ tests: - class: "org.elasticsearch.xpack.security.ScrollHelperIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/109905" method: "testFetchAllEntities" +- class: "org.elasticsearch.xpack.ml.integration.AutodetectMemoryLimitIT" + issue: "https://github.com/elastic/elasticsearch/issues/109904" # Examples: # From ba91bfdc94a70a0da11875aa7fe89b4a902375a9 Mon Sep 17 00:00:00 2001 From: Niels Bauman <33722607+nielsbauman@users.noreply.github.com> Date: Wed, 19 Jun 2024 13:18:47 +0200 Subject: [PATCH 24/44] Lazily create the failure store (#109289) Rather than initializing the failure store right away when a new data stream is created, we leave it empty and mark it for lazy rollover. This results in the failure store only being initialized (i.e. an index created) when a failure has actually occurred. The exception to the rule is when a failure occurs while the data stream is being auto-created. In that case, we do want to initialize the failure store right away. --- .../change-mappings-and-settings.asciidoc | 2 +- .../data-streams/downsampling-manual.asciidoc | 2 +- ...grate-data-stream-from-ilm-to-dsl.asciidoc | 8 +- .../indices/get-data-stream.asciidoc | 2 +- .../datastreams/DataStreamsSnapshotsIT.java | 11 + .../datastreams/FailureStoreQueryParamIT.java | 2 + .../DataStreamGetWriteIndexTests.java | 2 +- .../test/data_stream/10_basic.yml | 121 +++++++++- .../data_stream/170_modify_data_stream.yml | 138 +++++++++-- .../190_failure_store_redirection.yml | 4 +- .../200_rollover_failure_store.yml | 215 +++++++++++++++--- .../30_auto_create_data_stream.yml | 7 +- .../org/elasticsearch/TransportVersions.java | 1 + .../indices/create/AutoCreateAction.java | 3 +- .../indices/create/CreateIndexRequest.java | 25 +- .../rollover/MetadataRolloverService.java | 33 ++- .../rollover/TransportRolloverAction.java | 46 +++- .../action/bulk/TransportBulkAction.java | 51 +++-- .../bulk/TransportSimulateBulkAction.java | 3 +- .../cluster/metadata/DataStream.java | 18 +- .../metadata/IndexNameExpressionResolver.java | 16 +- .../MetadataCreateDataStreamService.java | 58 +++-- .../MetadataMigrateToDataStreamService.java | 4 +- .../MetadataRolloverServiceTests.java | 11 - ...ActionIndicesThatCannotBeCreatedTests.java | 5 +- .../bulk/TransportBulkActionIngestTests.java | 4 +- .../action/bulk/TransportBulkActionTests.java | 4 +- .../TransportSimulateBulkActionTests.java | 6 +- .../cluster/metadata/DataStreamTests.java | 27 +-- .../MetadataCreateDataStreamServiceTests.java | 76 +++++-- .../ccr/action/TransportPutFollowAction.java | 3 + .../xpack/core/DataStreamRestIT.java | 3 + 32 files changed, 702 insertions(+), 209 deletions(-) diff --git a/docs/reference/data-streams/change-mappings-and-settings.asciidoc b/docs/reference/data-streams/change-mappings-and-settings.asciidoc index c96f0c7342a96..076b315558b60 100644 --- a/docs/reference/data-streams/change-mappings-and-settings.asciidoc +++ b/docs/reference/data-streams/change-mappings-and-settings.asciidoc @@ -602,7 +602,7 @@ stream's oldest backing index. // TESTRESPONSE[s/"index_uuid": "_eEfRrFHS9OyhqWntkgHAQ"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-my-data-stream-2099.03.07-000001"/"index_name": $body.data_streams.0.indices.0.index_name/] // TESTRESPONSE[s/"index_name": ".ds-my-data-stream-2099.03.08-000002"/"index_name": $body.data_streams.0.indices.1.index_name/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":{"enabled": false, "indices": [], "rollover_on_write": false}/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":{"enabled": false, "indices": [], "rollover_on_write": true}/] <1> First item in the `indices` array for `my-data-stream`. This item contains information about the stream's oldest backing index, diff --git a/docs/reference/data-streams/downsampling-manual.asciidoc b/docs/reference/data-streams/downsampling-manual.asciidoc index 8f6b39d2aa0dd..771a08d97d949 100644 --- a/docs/reference/data-streams/downsampling-manual.asciidoc +++ b/docs/reference/data-streams/downsampling-manual.asciidoc @@ -389,7 +389,7 @@ This returns: // TESTRESPONSE[s/"ltOJGmqgTVm4T-Buoe7Acg"/$body.data_streams.0.indices.0.index_uuid/] // TESTRESPONSE[s/"2023-07-26T09:26:42.000Z"/$body.data_streams.0.time_series.temporal_ranges.0.start/] // TESTRESPONSE[s/"2023-07-26T13:26:42.000Z"/$body.data_streams.0.time_series.temporal_ranges.0.end/] -// TESTRESPONSE[s/"replicated": false/"replicated": false,"failure_store":{"enabled": false, "indices": [], "rollover_on_write": false}/] +// TESTRESPONSE[s/"replicated": false/"replicated": false,"failure_store":{"enabled": false, "indices": [], "rollover_on_write": true}/] <1> The backing index for this data stream. Before a backing index can be downsampled, the TSDS needs to be rolled over and diff --git a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc index b89f55dd41575..5b2e2a1ec70a2 100644 --- a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc +++ b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc @@ -147,7 +147,7 @@ and that the next generation index will also be managed by {ilm-init}: // TESTRESPONSE[s/"index_uuid": "xCEhwsp8Tey0-FLNFYVwSg"/"index_uuid": $body.data_streams.0.indices.0.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000002"/"index_name": $body.data_streams.0.indices.1.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":{"enabled": false, "indices": [], "rollover_on_write": false}/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":{"enabled": false, "indices": [], "rollover_on_write": true}/] <1> The name of the backing index. <2> For each backing index we display the value of the <> @@ -284,7 +284,7 @@ GET _data_stream/dsl-data-stream // TESTRESPONSE[s/"index_uuid": "xCEhwsp8Tey0-FLNFYVwSg"/"index_uuid": $body.data_streams.0.indices.0.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000002"/"index_name": $body.data_streams.0.indices.1.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":{"enabled": false, "indices": [], "rollover_on_write": false}/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":{"enabled": false, "indices": [], "rollover_on_write": true}/] <1> The existing backing index will continue to be managed by {ilm-init} <2> The existing backing index will continue to be managed by {ilm-init} @@ -364,7 +364,7 @@ GET _data_stream/dsl-data-stream // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000003"/"index_name": $body.data_streams.0.indices.2.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8abcd1"/"index_uuid": $body.data_streams.0.indices.2.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":{"enabled": false, "indices": [], "rollover_on_write": false}/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":{"enabled": false, "indices": [], "rollover_on_write": true}/] <1> The backing indices that existed before rollover will continue to be managed by {ilm-init} <2> The backing indices that existed before rollover will continue to be managed by {ilm-init} @@ -462,7 +462,7 @@ GET _data_stream/dsl-data-stream // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000003"/"index_name": $body.data_streams.0.indices.2.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8abcd1"/"index_uuid": $body.data_streams.0.indices.2.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":{"enabled": false, "indices": [], "rollover_on_write": false}/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":{"enabled": false, "indices": [], "rollover_on_write": true}/] <1> The write index is now managed by {ilm-init} <2> The `lifecycle` configured on the data stream is now disabled. <3> The next write index will be managed by {ilm-init} diff --git a/docs/reference/indices/get-data-stream.asciidoc b/docs/reference/indices/get-data-stream.asciidoc index 0a318cd135914..b88a1a1be2a7e 100644 --- a/docs/reference/indices/get-data-stream.asciidoc +++ b/docs/reference/indices/get-data-stream.asciidoc @@ -358,4 +358,4 @@ The API returns the following response: // TESTRESPONSE[s/"index_name": ".ds-my-data-stream-two-2099.03.08-000001"/"index_name": $body.data_streams.1.indices.0.index_name/] // TESTRESPONSE[s/"index_uuid": "3liBu2SYS5axasRt6fUIpA"/"index_uuid": $body.data_streams.1.indices.0.index_uuid/] // TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] -// TESTRESPONSE[s/"replicated": false/"replicated": false,"failure_store":{"enabled": false, "indices": [], "rollover_on_write": false}/] +// TESTRESPONSE[s/"replicated": false/"replicated": false,"failure_store":{"enabled": false, "indices": [], "rollover_on_write": true}/] diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index 1bd4d54b9c804..369f3a9d42724 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverAction; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComposableIndexTemplateAction; @@ -127,6 +128,16 @@ public void setup() throws Exception { response = client.execute(CreateDataStreamAction.INSTANCE, request).get(); assertTrue(response.isAcknowledged()); + // Initialize the failure store. + RolloverRequest rolloverRequest = new RolloverRequest("with-fs", null); + rolloverRequest.setIndicesOptions( + IndicesOptions.builder(rolloverRequest.indicesOptions()) + .failureStoreOptions(b -> b.includeRegularIndices(false).includeFailureIndices(true)) + .build() + ); + response = client.execute(RolloverAction.INSTANCE, rolloverRequest).get(); + assertTrue(response.isAcknowledged()); + // Resolve backing index names after data streams have been created: // (these names have a date component, and running around midnight could lead to test failures otherwise) GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { "*" }); diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java index 27cd5697fd0f7..4af3a3844e453 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java @@ -51,6 +51,8 @@ public void setup() throws IOException { assertOK(client().performRequest(putComposableIndexTemplateRequest)); assertOK(client().performRequest(new Request("PUT", "/_data_stream/" + DATA_STREAM_NAME))); + // Initialize the failure store. + assertOK(client().performRequest(new Request("POST", DATA_STREAM_NAME + "/_rollover?target_failure_store"))); ensureGreen(DATA_STREAM_NAME); final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + DATA_STREAM_NAME)); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java index 8ca5fe1fcdbcf..6a1f8031ce7c6 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java @@ -315,7 +315,7 @@ private ClusterState createDataStream(ClusterState state, String name, Instant t TimeValue.ZERO, false ); - return createDataStreamService.createDataStream(request, state, ActionListener.noop()); + return createDataStreamService.createDataStream(request, state, ActionListener.noop(), false); } private MetadataRolloverService.RolloverResult rolloverOver(ClusterState state, String name, Instant time) throws Exception { diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml index 5b88f414634b5..609b0c3d0c33c 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml @@ -261,6 +261,12 @@ setup: index: default_pipeline: "data_stream_pipeline" final_pipeline: "data_stream_final_pipeline" + mappings: + properties: + '@timestamp': + type: date + count: + type: long - do: indices.create_data_stream: @@ -272,6 +278,23 @@ setup: name: failure-data-stream2 - is_true: acknowledged + # Initialize failure store + - do: + index: + index: failure-data-stream1 + refresh: true + body: + '@timestamp': '2020-12-12' + count: 'invalid value' + # Initialize failure store + - do: + index: + index: failure-data-stream2 + refresh: true + body: + '@timestamp': '2020-12-12' + count: 'invalid value' + - do: cluster.health: wait_for_status: green @@ -281,7 +304,7 @@ setup: name: "*" - match: { data_streams.0.name: failure-data-stream1 } - match: { data_streams.0.timestamp_field.name: '@timestamp' } - - match: { data_streams.0.generation: 1 } + - match: { data_streams.0.generation: 2 } - length: { data_streams.0.indices: 1 } - match: { data_streams.0.indices.0.index_name: '/\.ds-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - match: { data_streams.0.status: 'GREEN' } @@ -289,18 +312,18 @@ setup: - match: { data_streams.0.hidden: false } - match: { data_streams.0.failure_store.enabled: true } - length: { data_streams.0.failure_store.indices: 1 } - - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000001/'} + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000002/'} - match: { data_streams.1.name: failure-data-stream2 } - match: { data_streams.1.timestamp_field.name: '@timestamp' } - - match: { data_streams.1.generation: 1 } + - match: { data_streams.1.generation: 2 } - length: { data_streams.1.indices: 1 } - match: { data_streams.1.indices.0.index_name: '/\.ds-failure-data-stream2-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - match: { data_streams.1.template: 'my-template4' } - match: { data_streams.1.hidden: false } - match: { data_streams.1.failure_store.enabled: true } - length: { data_streams.1.failure_store.indices: 1 } - - match: { data_streams.1.failure_store.indices.0.index_name: '/\.fs-failure-data-stream2-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.1.failure_store.indices.0.index_name: '/\.fs-failure-data-stream2-(\d{4}\.\d{2}\.\d{2}-)?000002/' } # save the backing index names for later use - set: { data_streams.0.indices.0.index_name: idx0name } @@ -603,7 +626,7 @@ setup: index: $idx0name --- -"Delete data stream with failure stores": +"Delete data stream with failure store": - requires: cluster_features: ["gte_v8.15.0"] reason: "data stream failure stores REST structure changed in 8.15+" @@ -617,12 +640,28 @@ setup: index_patterns: [ failure-data-stream1 ] data_stream: failure_store: true + template: + mappings: + properties: + '@timestamp': + type: date + count: + type: long - do: indices.create_data_stream: name: failure-data-stream1 - is_true: acknowledged + # Initialize failure store + - do: + index: + index: failure-data-stream1 + refresh: true + body: + '@timestamp': '2020-12-12' + count: 'invalid value' + - do: indices.create: index: test_index @@ -650,11 +689,11 @@ setup: indices.get_data_stream: {} - match: { data_streams.0.name: failure-data-stream1 } - match: { data_streams.0.timestamp_field.name: '@timestamp' } - - match: { data_streams.0.generation: 1 } + - match: { data_streams.0.generation: 2 } - length: { data_streams.0.indices: 1 } - match: { data_streams.0.indices.0.index_name: '/\.ds-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - length: { data_streams.0.failure_store.indices: 1 } - - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000002/' } - do: indices.delete_data_stream: @@ -676,6 +715,74 @@ setup: name: my-template4 - is_true: acknowledged +--- +"Delete data stream with failure store uninitialized": + - requires: + cluster_features: ["gte_v8.15.0"] + reason: "data stream failure stores REST structure changed in 8.15+" + + - do: + allowed_warnings: + - "index template [my-template4] has index patterns [failure-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template4] will take precedence during new index creation" + indices.put_index_template: + name: my-template4 + body: + index_patterns: [ failure-data-stream1 ] + data_stream: + failure_store: true + + - do: + indices.create_data_stream: + name: failure-data-stream1 + - is_true: acknowledged + + - do: + indices.create: + index: test_index + body: + settings: + number_of_shards: 1 + number_of_replicas: 1 + + # save the backing index names for later use + - do: + indices.get_data_stream: + name: failure-data-stream1 + + - set: { data_streams.0.indices.0.index_name: idx0name } + - length: { data_streams.0.failure_store.indices: 0 } + + - do: + indices.get: + index: ['.ds-failure-data-stream1-*000001', 'test_index'] + + - is_true: test_index.settings + - is_true: .$idx0name.settings + + - do: + indices.get_data_stream: {} + - match: { data_streams.0.name: failure-data-stream1 } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - match: { data_streams.0.generation: 1 } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - length: { data_streams.0.failure_store.indices: 0 } + + - do: + indices.delete_data_stream: + name: failure-data-stream1 + - is_true: acknowledged + + - do: + catch: missing + indices.get: + index: $idx0name + + - do: + indices.delete_index_template: + name: my-template4 + - is_true: acknowledged + --- "Delete data stream missing behaviour": - requires: diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml index a3baa524259b8..3c6d29d939226 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml @@ -105,6 +105,13 @@ index_patterns: [data-*] data_stream: failure_store: true + template: + mappings: + properties: + '@timestamp': + type: date + count: + type: long - do: indices.create_data_stream: @@ -116,6 +123,23 @@ name: data-stream-for-modification2 - is_true: acknowledged + # Initialize failure store + - do: + index: + index: data-stream-for-modification + refresh: true + body: + '@timestamp': '2020-12-12' + count: 'invalid value' + # Initialize failure store + - do: + index: + index: data-stream-for-modification2 + refresh: true + body: + '@timestamp': '2020-12-12' + count: 'invalid value' + # rollover data stream to create new failure store index - do: indices.rollover: @@ -168,7 +192,7 @@ name: "data-stream-for-modification" - match: { data_streams.0.name: data-stream-for-modification } - match: { data_streams.0.timestamp_field.name: '@timestamp' } - - match: { data_streams.0.generation: 3 } + - match: { data_streams.0.generation: 4 } - length: { data_streams.0.indices: 1 } - length: { data_streams.0.failure_store.indices: 3 } - match: { data_streams.0.indices.0.index_name: $write_index } @@ -187,17 +211,6 @@ index: test_index2 failure_store: true - # We are not allowed to remove the write index for the failure store - - do: - catch: /cannot remove backing index \[.*\] of data stream \[data-stream-for-modification\] because it is the write index/ - indices.modify_data_stream: - body: - actions: - - remove_backing_index: - data_stream: "data-stream-for-modification" - index: $write_failure_index - failure_store: true - # We will not accept an index that is already part of the data stream's backing indices - do: catch: /cannot add index \[.*\] to data stream \[data-stream-for-modification\] because it is already a backing index on data stream \[data-stream-for-modification\]/ @@ -267,13 +280,112 @@ name: "data-stream-for-modification" - match: { data_streams.0.name: data-stream-for-modification } - match: { data_streams.0.timestamp_field.name: '@timestamp' } - - match: { data_streams.0.generation: 4 } + - match: { data_streams.0.generation: 5 } - length: { data_streams.0.indices: 1 } - length: { data_streams.0.failure_store.indices: 2 } - match: { data_streams.0.indices.0.index_name: $write_index } - match: { data_streams.0.failure_store.indices.0.index_name: $first_failure_index } - match: { data_streams.0.failure_store.indices.1.index_name: $write_failure_index } + # Remove write index of the failure store + - do: + indices.modify_data_stream: + body: + actions: + - remove_backing_index: + data_stream: "data-stream-for-modification" + index: $write_failure_index + failure_store: true + - is_true: acknowledged + + - do: + indices.get_data_stream: + name: "data-stream-for-modification" + - match: { data_streams.0.name: data-stream-for-modification } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - match: { data_streams.0.generation: 6 } + - length: { data_streams.0.indices: 1 } + - length: { data_streams.0.failure_store.indices: 1 } + - match: { data_streams.0.indices.0.index_name: $write_index } + - match: { data_streams.0.failure_store.indices.0.index_name: $first_failure_index } + + # Remove the last write index of the failure store + - do: + indices.modify_data_stream: + body: + actions: + - remove_backing_index: + data_stream: "data-stream-for-modification" + index: $first_failure_index + failure_store: true + - is_true: acknowledged + + - do: + indices.get_data_stream: + name: "data-stream-for-modification" + - match: { data_streams.0.name: data-stream-for-modification } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - match: { data_streams.0.generation: 7 } + - length: { data_streams.0.indices: 1 } + - length: { data_streams.0.failure_store.indices: 0 } + - match: { data_streams.0.indices.0.index_name: $write_index } + + # Doing these checks again to make sure we still return the same error with an empty failure store + # We will not accept an index that is already part of the data stream's backing indices + - do: + catch: /cannot add index \[.*\] to data stream \[data-stream-for-modification\] because it is already a backing index on data stream \[data-stream-for-modification\]/ + indices.modify_data_stream: + body: + actions: + - add_backing_index: + data_stream: "data-stream-for-modification" + index: $write_index + failure_store: true + + # We will not accept an index that is already part of a different data stream's backing indices + - do: + catch: /cannot add index \[.*\] to data stream \[data-stream-for-modification\] because it is already a backing index on data stream \[data-stream-for-modification2\]/ + indices.modify_data_stream: + body: + actions: + - add_backing_index: + data_stream: "data-stream-for-modification" + index: $second_write_index + failure_store: true + + # We will not accept an index that is already part of a different data stream's failure store + - do: + catch: /cannot add index \[.*\] to data stream \[data-stream-for-modification\] because it is already a failure store index on data stream \[data-stream-for-modification2\]/ + indices.modify_data_stream: + body: + actions: + - add_backing_index: + data_stream: "data-stream-for-modification" + index: $second_write_failure_index + failure_store: true + + # We will return a failed response if we try to remove an index from the failure store that is not present + - do: + catch: /index \[.*\] not found/ + indices.modify_data_stream: + body: + actions: + - remove_backing_index: + data_stream: "data-stream-for-modification" + index: $write_index + failure_store: true + + # Add index to empty failure store + - do: + indices.modify_data_stream: + body: + actions: + - add_backing_index: + data_stream: "data-stream-for-modification" + index: "test_index1" + failure_store: true + - is_true: acknowledged + - do: indices.delete_data_stream: name: data-stream-for-modification diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml index 5682e2235abc8..04c70ee380d4f 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml @@ -181,7 +181,7 @@ teardown: - match: { data_streams.0.indices.0.index_name: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - match: { data_streams.0.failure_store.enabled: true } - length: { data_streams.0.failure_store.indices: 1 } - - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000002/' } - do: search: @@ -193,7 +193,7 @@ teardown: search: index: .fs-logs-foobar-* - length: { hits.hits: 1 } - - match: { hits.hits.0._index: "/\\.fs-logs-foobar-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000001/" } + - match: { hits.hits.0._index: "/\\.fs-logs-foobar-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } - exists: hits.hits.0._source.@timestamp - not_exists: hits.hits.0._source.count - match: { hits.hits.0._source.document.index: 'logs-foobar' } diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml index 8cdfe3d97bbb8..dcbb0d2e465db 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml @@ -39,14 +39,23 @@ teardown: ignore: 404 --- "Roll over a data stream's failure store without conditions": + # Initialize failure store + - do: + index: + index: data-stream-for-rollover + refresh: true + body: + '@timestamp': '2020-12-12' + count: 'invalid value' + - do: indices.rollover: alias: "data-stream-for-rollover" target_failure_store: true - match: { acknowledged: true } - - match: { old_index: "/\\.fs-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000001/" } - - match: { new_index: "/\\.fs-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } + - match: { old_index: "/\\.fs-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } + - match: { new_index: "/\\.fs-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000003/" } - match: { rolled_over: true } - match: { dry_run: false } @@ -56,12 +65,12 @@ teardown: - match: { data_streams.0.name: data-stream-for-rollover } - match: { data_streams.0.timestamp_field.name: '@timestamp' } # Both backing and failure indices use the same generation field. - - match: { data_streams.0.generation: 2 } + - match: { data_streams.0.generation: 3 } - length: { data_streams.0.indices: 1 } - match: { data_streams.0.indices.0.index_name: '/\.ds-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - length: { data_streams.0.failure_store.indices: 2 } - - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - - match: { data_streams.0.failure_store.indices.1.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + - match: { data_streams.0.failure_store.indices.1.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000003/' } --- "Roll over a data stream's failure store with conditions": @@ -82,8 +91,8 @@ teardown: max_docs: 1 - match: { acknowledged: true } - - match: { old_index: "/\\.fs-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000001/" } - - match: { new_index: "/\\.fs-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } + - match: { old_index: "/\\.fs-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } + - match: { new_index: "/\\.fs-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000003/" } - match: { rolled_over: true } - match: { dry_run: false } @@ -93,22 +102,31 @@ teardown: - match: { data_streams.0.name: data-stream-for-rollover } - match: { data_streams.0.timestamp_field.name: '@timestamp' } # Both backing and failure indices use the same generation field. - - match: { data_streams.0.generation: 2 } + - match: { data_streams.0.generation: 3 } - length: { data_streams.0.indices: 1 } - match: { data_streams.0.indices.0.index_name: '/\.ds-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - length: { data_streams.0.failure_store.indices: 2 } - - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - - match: { data_streams.0.failure_store.indices.1.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + - match: { data_streams.0.failure_store.indices.1.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000003/' } --- "Don't roll over a data stream's failure store when conditions aren't met": + # Initialize failure store + - do: + index: + index: data-stream-for-rollover + refresh: true + body: + '@timestamp': '2020-12-12' + count: 'invalid value' + - do: indices.rollover: alias: "data-stream-for-rollover" target_failure_store: true body: conditions: - max_docs: 1 + max_primary_shard_docs: 2 - match: { acknowledged: false } - match: { rolled_over: false } @@ -119,11 +137,11 @@ teardown: name: "*" - match: { data_streams.0.name: data-stream-for-rollover } - match: { data_streams.0.timestamp_field.name: '@timestamp' } - - match: { data_streams.0.generation: 1 } + - match: { data_streams.0.generation: 2 } - length: { data_streams.0.indices: 1 } - match: { data_streams.0.indices.0.index_name: '/\.ds-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - length: { data_streams.0.failure_store.indices: 1 } - - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } --- "Lazily roll over a data stream's failure store after a shard failure": @@ -135,6 +153,15 @@ teardown: path: /{index}/_rollover capabilities: [lazy-rollover-failure-store] + # Initialize failure store + - do: + index: + index: data-stream-for-rollover + refresh: true + body: + '@timestamp': '2020-12-12' + count: 'invalid value' + # Mark the failure store for lazy rollover - do: indices.rollover: @@ -151,11 +178,11 @@ teardown: name: "*" - match: { data_streams.0.name: data-stream-for-rollover } - match: { data_streams.0.timestamp_field.name: '@timestamp' } - - match: { data_streams.0.generation: 1 } + - match: { data_streams.0.generation: 2 } - length: { data_streams.0.indices: 1 } - match: { data_streams.0.indices.0.index_name: '/\.ds-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - length: { data_streams.0.failure_store.indices: 1 } - - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } - do: index: @@ -171,24 +198,20 @@ teardown: - match: { data_streams.0.name: data-stream-for-rollover } - match: { data_streams.0.timestamp_field.name: '@timestamp' } # Both backing and failure indices use the same generation field. - - match: { data_streams.0.generation: 2 } + - match: { data_streams.0.generation: 3 } - length: { data_streams.0.indices: 1 } - match: { data_streams.0.indices.0.index_name: '/\.ds-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - length: { data_streams.0.failure_store.indices: 2 } - - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - - match: { data_streams.0.failure_store.indices.1.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + - match: { data_streams.0.failure_store.indices.1.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000003/' } + # Ensure failure got redirected to new index (after rollover). - do: search: index: .fs-data-stream-for-rollover-* - - length: { hits.hits: 1 } + - length: { hits.hits: 2 } - match: { hits.hits.0._index: "/\\.fs-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } - - exists: hits.hits.0._source.@timestamp - - not_exists: hits.hits.0._source.count - - match: { hits.hits.0._source.document.index: 'data-stream-for-rollover' } - - match: { hits.hits.0._source.document.source.@timestamp: '2020-12-12' } - - match: { hits.hits.0._source.document.source.count: 'invalid value' } - - match: { hits.hits.0._source.error.type: 'document_parsing_exception' } + - match: { hits.hits.1._index: "/\\.fs-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000003/" } --- "Lazily roll over a data stream's failure store after an ingest failure": @@ -234,6 +257,15 @@ teardown: indices.create_data_stream: name: data-stream-for-lazy-rollover + # Initialize failure store + - do: + index: + index: data-stream-for-lazy-rollover + refresh: true + body: + '@timestamp': '2020-12-12' + count: 'invalid value' + # Mark the failure store for lazy rollover - do: indices.rollover: @@ -250,11 +282,11 @@ teardown: name: "*" - match: { data_streams.0.name: data-stream-for-lazy-rollover } - match: { data_streams.0.timestamp_field.name: '@timestamp' } - - match: { data_streams.0.generation: 1 } + - match: { data_streams.0.generation: 2 } - length: { data_streams.0.indices: 1 } - match: { data_streams.0.indices.0.index_name: '/\.ds-data-stream-for-lazy-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - length: { data_streams.0.failure_store.indices: 1 } - - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-lazy-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-lazy-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } - do: index: @@ -270,13 +302,20 @@ teardown: - match: { data_streams.0.name: data-stream-for-lazy-rollover } - match: { data_streams.0.timestamp_field.name: '@timestamp' } # Both backing and failure indices use the same generation field. - - match: { data_streams.0.generation: 2 } + - match: { data_streams.0.generation: 3 } - length: { data_streams.0.indices: 1 } - match: { data_streams.0.indices.0.index_name: '/\.ds-data-stream-for-lazy-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - length: { data_streams.0.failure_store.indices: 2 } - - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-lazy-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - - match: { data_streams.0.failure_store.indices.1.index_name: '/\.fs-data-stream-for-lazy-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-lazy-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + - match: { data_streams.0.failure_store.indices.1.index_name: '/\.fs-data-stream-for-lazy-rollover-(\d{4}\.\d{2}\.\d{2}-)?000003/' } + # Ensure failure got redirected to new index (after rollover). + - do: + search: + index: .fs-data-stream-for-lazy-rollover-* + - length: { hits.hits: 2 } + - match: { hits.hits.0._index: "/\\.fs-data-stream-for-lazy-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } + - match: { hits.hits.1._index: "/\\.fs-data-stream-for-lazy-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000003/" } --- "A failure store marked for lazy rollover should only be rolled over when there is a failure": - requires: @@ -287,6 +326,15 @@ teardown: path: /{index}/_rollover capabilities: [lazy-rollover-failure-store] + # Initialize failure store + - do: + index: + index: data-stream-for-rollover + refresh: true + body: + '@timestamp': '2020-12-12' + count: 'invalid value' + # Mark the failure store for lazy rollover - do: indices.rollover: @@ -303,11 +351,11 @@ teardown: name: "*" - match: { data_streams.0.name: data-stream-for-rollover } - match: { data_streams.0.timestamp_field.name: '@timestamp' } - - match: { data_streams.0.generation: 1 } + - match: { data_streams.0.generation: 2 } - length: { data_streams.0.indices: 1 } - match: { data_streams.0.indices.0.index_name: '/\.ds-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - length: { data_streams.0.failure_store.indices: 1 } - - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } - do: index: @@ -323,8 +371,107 @@ teardown: - match: { data_streams.0.name: data-stream-for-rollover } - match: { data_streams.0.timestamp_field.name: '@timestamp' } # Both backing and failure indices use the same generation field. - - match: { data_streams.0.generation: 1 } + - match: { data_streams.0.generation: 2 } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - length: { data_streams.0.failure_store.indices: 1 } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + +--- +"Rolling over an uninitialized failure store should initialize it": + # Initializing with conditions is not allowed. + - do: + catch: /Rolling over\/initializing an empty failure store is only supported without conditions\./ + indices.rollover: + alias: "data-stream-for-rollover" + target_failure_store: true + body: + conditions: + max_docs: 1 + + - do: + indices.rollover: + alias: "data-stream-for-rollover" + target_failure_store: true + + - match: { acknowledged: true } + - match: { old_index: "_none_" } + - match: { new_index: "/\\.fs-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } + - match: { rolled_over: true } + - match: { dry_run: false } + + - do: + indices.get_data_stream: + name: "*" + - match: { data_streams.0.name: data-stream-for-rollover } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + # Both backing and failure indices use the same generation field. + - match: { data_streams.0.generation: 2 } - length: { data_streams.0.indices: 1 } - match: { data_streams.0.indices.0.index_name: '/\.ds-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - length: { data_streams.0.failure_store.indices: 1 } - - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + +--- +"Rolling over a failure store on a data stream without the failure store enabled should work": + - do: + allowed_warnings: + - "index template [my-other-template] has index patterns [data-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template] will take precedence during new index creation" + indices.put_index_template: + name: my-other-template + body: + index_patterns: [other-data-*] + data_stream: {} + + - do: + indices.create_data_stream: + name: other-data-stream-for-rollover + + # Initializing should work + - do: + indices.rollover: + alias: "other-data-stream-for-rollover" + target_failure_store: true + + - match: { acknowledged: true } + - match: { old_index: "_none_" } + - match: { new_index: "/\\.fs-other-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } + - match: { rolled_over: true } + - match: { dry_run: false } + + - do: + indices.get_data_stream: + name: other-data-stream-for-rollover + - match: { data_streams.0.name: other-data-stream-for-rollover } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + # Both backing and failure indices use the same generation field. + - match: { data_streams.0.generation: 2 } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-other-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - length: { data_streams.0.failure_store.indices: 1 } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-other-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + + # And "regular" rollover should work + - do: + indices.rollover: + alias: "other-data-stream-for-rollover" + target_failure_store: true + + - match: { acknowledged: true } + - match: { old_index: "/\\.fs-other-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } + - match: { new_index: "/\\.fs-other-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000003/" } + - match: { rolled_over: true } + - match: { dry_run: false } + + - do: + indices.get_data_stream: + name: other-data-stream-for-rollover + - match: { data_streams.0.name: other-data-stream-for-rollover } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + # Both backing and failure indices use the same generation field. + - match: { data_streams.0.generation: 3 } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-other-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - length: { data_streams.0.failure_store.indices: 2 } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-other-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + - match: { data_streams.0.failure_store.indices.1.index_name: '/\.fs-other-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000003/' } diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml index 3ab22e6271c6d..61d17c3d675cf 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml @@ -1,5 +1,5 @@ --- -"Put index template": +"Auto-create data stream": - requires: cluster_features: ["gte_v7.9.0"] reason: "data streams only supported in 7.9+" @@ -48,7 +48,7 @@ - is_true: acknowledged --- -"Put index template with failure store": +"Don't initialize failure store during data stream auto-creation on successful index": - requires: cluster_features: ["gte_v8.15.0"] reason: "data stream failure stores REST structure changed in 8.15+" @@ -92,8 +92,7 @@ - length: { data_streams.0.indices: 1 } - match: { data_streams.0.indices.0.index_name: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - match: { data_streams.0.failure_store.enabled: true } - - length: { data_streams.0.failure_store.indices: 1 } - - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - length: { data_streams.0.failure_store.indices: 0 } - do: indices.delete_data_stream: diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 754d07a89dbce..ec02b8a45cd42 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -195,6 +195,7 @@ static TransportVersion def(int id) { public static final TransportVersion DELETE_SNAPSHOTS_ASYNC_ADDED = def(8_686_00_0); public static final TransportVersion VERSION_SUPPORTING_SPARSE_VECTOR_STATS = def(8_687_00_0); public static final TransportVersion ML_AD_OUTPUT_MEMORY_ALLOCATOR_FIELD = def(8_688_00_0); + public static final TransportVersion FAILURE_STORE_LAZY_CREATION = def(8_689_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java index 094fccbc35182..e68263aab5330 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java @@ -261,7 +261,8 @@ ClusterState execute( ClusterState clusterState = metadataCreateDataStreamService.createDataStream( createRequest, currentState, - rerouteCompletionIsNotRequired() + rerouteCompletionIsNotRequired(), + request.isInitializeFailureStore() ); final var dataStream = clusterState.metadata().dataStreams().get(request.index()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 3a78738ae986a..5d1b7264ebf81 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -64,6 +64,8 @@ public class CreateIndexRequest extends AcknowledgedRequest private boolean requireDataStream; + private boolean initializeFailureStore; + private Settings settings = Settings.EMPTY; private String mappings = "{}"; @@ -109,6 +111,11 @@ public CreateIndexRequest(StreamInput in) throws IOException { } else { requireDataStream = false; } + if (in.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_LAZY_CREATION)) { + initializeFailureStore = in.readBoolean(); + } else { + initializeFailureStore = true; + } } public CreateIndexRequest() { @@ -468,6 +475,19 @@ public CreateIndexRequest requireDataStream(boolean requireDataStream) { return this; } + public boolean isInitializeFailureStore() { + return initializeFailureStore; + } + + /** + * Set whether this CreateIndexRequest should initialize the failure store on data stream creation. This can be necessary when, for + * example, a failure occurs while trying to ingest a document into a data stream that has to be auto-created. + */ + public CreateIndexRequest initializeFailureStore(boolean initializeFailureStore) { + this.initializeFailureStore = initializeFailureStore; + return this; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -491,7 +511,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(origin); } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { - out.writeOptionalBoolean(this.requireDataStream); + out.writeBoolean(this.requireDataStream); + } + if (out.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_LAZY_CREATION)) { + out.writeBoolean(this.initializeFailureStore); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java index ed3721b35f3b4..5c7518abdbbf8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java @@ -84,6 +84,7 @@ public class MetadataRolloverService { AutoShardingType.COOLDOWN_PREVENTED_DECREASE, "es.auto_sharding.cooldown_prevented_decrease.total" ); + private static final String NON_EXISTENT_SOURCE = "_none_"; private final ThreadPool threadPool; private final MetadataCreateIndexService createIndexService; @@ -221,14 +222,13 @@ private static NameResolution resolveAliasRolloverNames(Metadata metadata, Index private static NameResolution resolveDataStreamRolloverNames(Metadata metadata, DataStream dataStream, boolean isFailureStoreRollover) { final DataStream.DataStreamIndices dataStreamIndices = dataStream.getDataStreamIndices(isFailureStoreRollover); - assert dataStreamIndices.getWriteIndex() != null : "Unable to roll over dataStreamIndices with no indices"; + assert dataStreamIndices.getIndices().isEmpty() == false || isFailureStoreRollover + : "Unable to roll over dataStreamIndices with no indices"; - final IndexMetadata originalWriteIndex = metadata.index(dataStreamIndices.getWriteIndex()); - return new NameResolution( - originalWriteIndex.getIndex().getName(), - null, - dataStream.nextWriteIndexAndGeneration(metadata, dataStreamIndices).v1() - ); + final String originalWriteIndex = dataStreamIndices.getIndices().isEmpty() && dataStreamIndices.isRolloverOnWrite() + ? NON_EXISTENT_SOURCE + : metadata.index(dataStreamIndices.getWriteIndex()).getIndex().getName(); + return new NameResolution(originalWriteIndex, null, dataStream.nextWriteIndexAndGeneration(metadata, dataStreamIndices).v1()); } private RolloverResult rolloverAlias( @@ -323,13 +323,14 @@ private RolloverResult rolloverDataStream( } final DataStream.DataStreamIndices dataStreamIndices = dataStream.getDataStreamIndices(isFailureStoreRollover); - final Index originalWriteIndex = dataStreamIndices.getWriteIndex(); + final boolean isLazyCreation = dataStreamIndices.getIndices().isEmpty() && dataStreamIndices.isRolloverOnWrite(); + final Index originalWriteIndex = isLazyCreation ? null : dataStreamIndices.getWriteIndex(); final Tuple nextIndexAndGeneration = dataStream.nextWriteIndexAndGeneration(metadata, dataStreamIndices); final String newWriteIndexName = nextIndexAndGeneration.v1(); final long newGeneration = nextIndexAndGeneration.v2(); MetadataCreateIndexService.validateIndexName(newWriteIndexName, currentState); // fails if the index already exists if (onlyValidate) { - return new RolloverResult(newWriteIndexName, originalWriteIndex.getName(), currentState); + return new RolloverResult(newWriteIndexName, isLazyCreation ? NON_EXISTENT_SOURCE : originalWriteIndex.getName(), currentState); } ClusterState newState; @@ -423,10 +424,12 @@ yield new DataStreamAutoShardingEvent( RolloverInfo rolloverInfo = new RolloverInfo(dataStreamName, metConditions, threadPool.absoluteTimeInMillis()); - Metadata.Builder metadataBuilder = Metadata.builder(newState.metadata()) - .put( + Metadata.Builder metadataBuilder = Metadata.builder(newState.metadata()); + if (isLazyCreation == false) { + metadataBuilder.put( IndexMetadata.builder(newState.metadata().index(originalWriteIndex)).stats(sourceIndexStats).putRolloverInfo(rolloverInfo) ); + } metadataBuilder = writeLoadForecaster.withWriteLoadForecastForWriteIndex(dataStreamName, metadataBuilder); metadataBuilder = withShardSizeForecastForWriteIndex(dataStreamName, metadataBuilder); @@ -434,7 +437,7 @@ yield new DataStreamAutoShardingEvent( newState = ClusterState.builder(newState).metadata(metadataBuilder).build(); newState = MetadataDataStreamsService.setRolloverOnWrite(newState, dataStreamName, false, isFailureStoreRollover); - return new RolloverResult(newWriteIndexName, originalWriteIndex.getName(), newState); + return new RolloverResult(newWriteIndexName, isLazyCreation ? NON_EXISTENT_SOURCE : originalWriteIndex.getName(), newState); } /** @@ -664,12 +667,6 @@ static void validate( "aliases, mappings, and index settings may not be specified when rolling over a data stream" ); } - var dataStream = (DataStream) indexAbstraction; - if (isFailureStoreRollover && dataStream.isFailureStoreEnabled() == false) { - throw new IllegalArgumentException( - "unable to roll over failure store because [" + indexAbstraction.getName() + "] does not have the failure store enabled" - ); - } } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index bf059f6fe868e..34da6795cd5f2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -230,6 +230,16 @@ protected void masterOperation( ); return; } + if (targetFailureStore && rolloverTargetAbstraction.isDataStreamRelated() == false) { + listener.onFailure(new IllegalStateException("Rolling over failure stores is only possible on data streams.")); + return; + } + + // When we're initializing a failure store, we skip the stats request because there is no source index to retrieve stats for. + if (targetFailureStore && ((DataStream) rolloverTargetAbstraction).getFailureIndices().getIndices().isEmpty()) { + initializeFailureStore(rolloverRequest, listener, trialSourceIndexName, trialRolloverIndexName); + return; + } final var statsIndicesOptions = new IndicesOptions( IndicesOptions.ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS, @@ -317,7 +327,7 @@ protected void masterOperation( // Pre-check the conditions to see whether we should submit a new cluster state task if (rolloverRequest.areConditionsMet(trialConditionResults)) { - String source = "rollover_index source [" + trialRolloverIndexName + "] to target [" + trialRolloverIndexName + "]"; + String source = "rollover_index source [" + trialSourceIndexName + "] to target [" + trialRolloverIndexName + "]"; RolloverTask rolloverTask = new RolloverTask( rolloverRequest, statsResponse, @@ -334,6 +344,40 @@ protected void masterOperation( ); } + private void initializeFailureStore( + RolloverRequest rolloverRequest, + ActionListener listener, + String trialSourceIndexName, + String trialRolloverIndexName + ) { + if (rolloverRequest.getConditionValues().isEmpty() == false) { + listener.onFailure( + new IllegalStateException("Rolling over/initializing an empty failure store is only supported without conditions.") + ); + return; + } + final RolloverResponse trialRolloverResponse = new RolloverResponse( + trialSourceIndexName, + trialRolloverIndexName, + Map.of(), + rolloverRequest.isDryRun(), + false, + false, + false, + rolloverRequest.isLazy() + ); + + // If this is a dry run, return with the results without invoking a cluster state update. + if (rolloverRequest.isDryRun()) { + listener.onResponse(trialRolloverResponse); + return; + } + + String source = "initialize_failure_store with index [" + trialRolloverIndexName + "]"; + RolloverTask rolloverTask = new RolloverTask(rolloverRequest, null, trialRolloverResponse, null, listener); + submitRolloverTask(rolloverRequest, source, rolloverTask); + } + void submitRolloverTask(RolloverRequest rolloverRequest, String source, RolloverTask rolloverTask) { rolloverTaskQueue.submitTask(source, rolloverTask, rolloverRequest.masterNodeTimeout()); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index a9431ca1eeff0..4fc17407ae6d0 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -350,7 +350,7 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, Executor ex return; } - Map indicesToAutoCreate = new HashMap<>(); + Map indicesToAutoCreate = new HashMap<>(); Set dataStreamsToBeRolledOver = new HashSet<>(); Set failureStoresToBeRolledOver = new HashSet<>(); populateMissingTargets(bulkRequest, indicesToAutoCreate, dataStreamsToBeRolledOver, failureStoresToBeRolledOver); @@ -373,19 +373,19 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, Executor ex * for lazy rollover. * * @param bulkRequest the bulk request - * @param indicesToAutoCreate a map of index names to whether they require a data stream + * @param indicesToAutoCreate a map of index names to their creation request that need to be auto-created * @param dataStreamsToBeRolledOver a set of data stream names that were marked for lazy rollover and thus need to be rolled over now * @param failureStoresToBeRolledOver a set of data stream names whose failure store was marked for lazy rollover and thus need to be * rolled over now */ private void populateMissingTargets( BulkRequest bulkRequest, - Map indicesToAutoCreate, + Map indicesToAutoCreate, Set dataStreamsToBeRolledOver, Set failureStoresToBeRolledOver ) { ClusterState state = clusterService.state(); - // A map for memorizing which indices we already exist (or don't). + // A map for memorizing which indices exist. Map indexExistence = new HashMap<>(); Function indexExistenceComputation = (index) -> indexNameExpressionResolver.hasIndexAbstraction(index, state); boolean lazyRolloverFeature = featureService.clusterHasFeature(state, LazyRolloverAction.DATA_STREAM_LAZY_ROLLOVER); @@ -399,19 +399,36 @@ private void populateMissingTargets( && request.versionType() != VersionType.EXTERNAL_GTE) { continue; } + boolean writeToFailureStore = request instanceof IndexRequest indexRequest && indexRequest.isWriteToFailureStore(); boolean indexExists = indexExistence.computeIfAbsent(request.index(), indexExistenceComputation); if (indexExists == false) { - // We should only auto create an index if _none_ of the requests are requiring it to be an alias. + // We should only auto-create an index if _none_ of the requests are requiring it to be an alias. if (request.isRequireAlias()) { - // Remember that this a request required this index to be an alias. + // Remember that this request required this index to be an alias. if (indicesThatRequireAlias.add(request.index())) { // If we didn't already know that, we remove the index from the list of indices to create (if present). indicesToAutoCreate.remove(request.index()); } } else if (indicesThatRequireAlias.contains(request.index()) == false) { - Boolean requiresDataStream = indicesToAutoCreate.get(request.index()); - if (requiresDataStream == null || (requiresDataStream == false && request.isRequireDataStream())) { - indicesToAutoCreate.put(request.index(), request.isRequireDataStream()); + CreateIndexRequest createIndexRequest = indicesToAutoCreate.get(request.index()); + // Create a new CreateIndexRequest if we didn't already have one. + if (createIndexRequest == null) { + createIndexRequest = new CreateIndexRequest(request.index()).cause("auto(bulk api)") + .masterNodeTimeout(bulkRequest.timeout()) + .requireDataStream(request.isRequireDataStream()) + // If this IndexRequest is directed towards a failure store, but the data stream doesn't exist, we initialize + // the failure store on data stream creation instead of lazily. + .initializeFailureStore(writeToFailureStore); + indicesToAutoCreate.put(request.index(), createIndexRequest); + } else { + // Track whether one of the index requests in this bulk request requires the target to be a data stream. + if (createIndexRequest.isRequireDataStream() == false && request.isRequireDataStream()) { + createIndexRequest.requireDataStream(true); + } + // Track whether one of the index requests in this bulk request is directed towards a failure store. + if (createIndexRequest.isInitializeFailureStore() == false && writeToFailureStore) { + createIndexRequest.initializeFailureStore(true); + } } } } @@ -419,7 +436,6 @@ private void populateMissingTargets( if (lazyRolloverFeature) { DataStream dataStream = state.metadata().dataStreams().get(request.index()); if (dataStream != null) { - var writeToFailureStore = request instanceof IndexRequest indexRequest && indexRequest.isWriteToFailureStore(); if (writeToFailureStore == false && dataStream.getBackingIndices().isRolloverOnWrite()) { dataStreamsToBeRolledOver.add(request.index()); } else if (lazyRolloverFailureStoreFeature @@ -441,7 +457,7 @@ protected void createMissingIndicesAndIndexData( BulkRequest bulkRequest, Executor executor, ActionListener listener, - Map indicesToAutoCreate, + Map indicesToAutoCreate, Set dataStreamsToBeRolledOver, Set failureStoresToBeRolledOver, long startTime @@ -468,14 +484,14 @@ protected void doRun() { private void createIndices( BulkRequest bulkRequest, - Map indicesToAutoCreate, + Map indicesToAutoCreate, Map indicesThatCannotBeCreated, AtomicArray responses, RefCountingRunnable refs ) { - for (Map.Entry indexEntry : indicesToAutoCreate.entrySet()) { + for (Map.Entry indexEntry : indicesToAutoCreate.entrySet()) { final String index = indexEntry.getKey(); - createIndex(index, indexEntry.getValue(), bulkRequest.timeout(), ActionListener.releaseAfter(new ActionListener<>() { + createIndex(indexEntry.getValue(), ActionListener.releaseAfter(new ActionListener<>() { @Override public void onResponse(CreateIndexResponse createIndexResponse) {} @@ -641,12 +657,7 @@ private static boolean isSystemIndex(SortedMap indices } } - void createIndex(String index, boolean requireDataStream, TimeValue timeout, ActionListener listener) { - CreateIndexRequest createIndexRequest = new CreateIndexRequest(); - createIndexRequest.index(index); - createIndexRequest.requireDataStream(requireDataStream); - createIndexRequest.cause("auto(bulk api)"); - createIndexRequest.masterNodeTimeout(timeout); + void createIndex(CreateIndexRequest createIndexRequest, ActionListener listener) { client.execute(AutoCreateAction.INSTANCE, createIndexRequest, listener); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java index 83d331d2e4aa1..f0f950ca324bf 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.ingest.SimulateIndexResponse; import org.elasticsearch.action.support.ActionFilters; @@ -72,7 +73,7 @@ protected void createMissingIndicesAndIndexData( BulkRequest bulkRequest, Executor executor, ActionListener listener, - Map indicesToAutoCreate, + Map indicesToAutoCreate, Set dataStreamsToRollover, Set failureStoresToBeRolledOver, long startTime diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index bf1d9462ab89f..03b23c462ecec 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -581,23 +581,13 @@ public DataStream removeFailureStoreIndex(Index index) { ); } - // TODO: When failure stores are lazily created, this wont necessarily be required anymore. We can remove the failure store write - // index as long as we mark the data stream to lazily rollover the failure store with no conditions on its next write - if (failureIndices.indices.size() == (failureIndexPosition + 1)) { - throw new IllegalArgumentException( - String.format( - Locale.ROOT, - "cannot remove backing index [%s] of data stream [%s] because it is the write index of the failure store", - index.getName(), - name - ) - ); - } - + // If this is the write index, we're marking the failure store for lazy rollover, to make sure a new write index gets created on the + // next write. We do this regardless of whether it's the last index in the failure store or not. + boolean rolloverOnWrite = failureIndices.indices.size() == (failureIndexPosition + 1); List updatedFailureIndices = new ArrayList<>(failureIndices.indices); updatedFailureIndices.remove(index); assert updatedFailureIndices.size() == failureIndices.indices.size() - 1; - return copy().setFailureIndices(failureIndices.copy().setIndices(updatedFailureIndices).build()) + return copy().setFailureIndices(failureIndices.copy().setIndices(updatedFailureIndices).setRolloverOnWrite(rolloverOnWrite).build()) .setGeneration(generation + 1) .build(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 8bc8f9d96bf24..459c6c6ec733e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -422,7 +422,7 @@ private static void resolveIndicesForDataStream(Context context, DataStream data } } } - if (shouldIncludeFailureIndices(context.getOptions(), dataStream)) { + if (shouldIncludeFailureIndices(context.getOptions())) { // We short-circuit here, if failure indices are not allowed and they can be skipped if (context.getOptions().allowFailureIndices() || context.getOptions().ignoreUnavailable() == false) { for (Index index : dataStream.getFailureIndices().getIndices()) { @@ -441,7 +441,7 @@ private static void resolveWriteIndexForDataStreams(Context context, DataStream concreteIndicesResult.add(writeIndex); } } - if (shouldIncludeFailureIndices(context.getOptions(), dataStream)) { + if (shouldIncludeFailureIndices(context.getOptions())) { Index failureStoreWriteIndex = dataStream.getFailureStoreWriteIndex(); if (failureStoreWriteIndex != null && addIndex(failureStoreWriteIndex, null, context)) { if (context.options.allowFailureIndices() == false) { @@ -456,10 +456,9 @@ private static boolean shouldIncludeRegularIndices(IndicesOptions indicesOptions return DataStream.isFailureStoreFeatureFlagEnabled() == false || indicesOptions.includeRegularIndices(); } - private static boolean shouldIncludeFailureIndices(IndicesOptions indicesOptions, DataStream dataStream) { - return DataStream.isFailureStoreFeatureFlagEnabled() - && indicesOptions.includeFailureIndices() - && dataStream.isFailureStoreEnabled(); + private static boolean shouldIncludeFailureIndices(IndicesOptions indicesOptions) { + // We return failure indices regardless of whether the data stream actually has the `failureStoreEnabled` flag set to true. + return DataStream.isFailureStoreFeatureFlagEnabled() && indicesOptions.includeFailureIndices(); } private static boolean resolvesToMoreThanOneIndex(IndexAbstraction indexAbstraction, Context context) { @@ -469,7 +468,7 @@ private static boolean resolvesToMoreThanOneIndex(IndexAbstraction indexAbstract if (shouldIncludeRegularIndices(context.getOptions())) { count += dataStream.getIndices().size(); } - if (shouldIncludeFailureIndices(context.getOptions(), dataStream)) { + if (shouldIncludeFailureIndices(context.getOptions())) { count += dataStream.getFailureIndices().getIndices().size(); } return count > 1; @@ -1426,8 +1425,7 @@ private static Stream expandToOpenClosed(Context context, Stream rerouteListener + ActionListener rerouteListener, + boolean initializeFailureStore ) throws Exception { - return createDataStream(metadataCreateIndexService, clusterService.getSettings(), current, isDslOnlyMode, request, rerouteListener); + return createDataStream( + metadataCreateIndexService, + clusterService.getSettings(), + current, + isDslOnlyMode, + request, + rerouteListener, + initializeFailureStore + ); } public static final class CreateDataStreamClusterStateUpdateRequest extends ClusterStateUpdateRequest< @@ -194,7 +198,8 @@ static ClusterState createDataStream( ClusterState currentState, boolean isDslOnlyMode, CreateDataStreamClusterStateUpdateRequest request, - ActionListener rerouteListener + ActionListener rerouteListener, + boolean initializeFailureStore ) throws Exception { return createDataStream( metadataCreateIndexService, @@ -204,7 +209,8 @@ static ClusterState createDataStream( request, List.of(), null, - rerouteListener + rerouteListener, + initializeFailureStore ); } @@ -212,11 +218,12 @@ static ClusterState createDataStream( * Creates a data stream with the specified request, backing indices and write index. * * @param metadataCreateIndexService Used if a new write index must be created - * @param currentState Cluster state - * @param request The create data stream request - * @param backingIndices List of backing indices. May be empty - * @param writeIndex Write index for the data stream. If null, a new write index will be created. - * @return Cluster state containing the new data stream + * @param currentState Cluster state + * @param request The create data stream request + * @param backingIndices List of backing indices. May be empty + * @param writeIndex Write index for the data stream. If null, a new write index will be created. + * @param initializeFailureStore Whether the failure store should be initialized + * @return Cluster state containing the new data stream */ static ClusterState createDataStream( MetadataCreateIndexService metadataCreateIndexService, @@ -226,7 +233,8 @@ static ClusterState createDataStream( CreateDataStreamClusterStateUpdateRequest request, List backingIndices, IndexMetadata writeIndex, - ActionListener rerouteListener + ActionListener rerouteListener, + boolean initializeFailureStore ) throws Exception { String dataStreamName = request.name; SystemDataStreamDescriptor systemDataStreamDescriptor = request.getSystemDataStreamDescriptor(); @@ -274,7 +282,7 @@ static ClusterState createDataStream( // If we need to create a failure store, do so first. Do not reroute during the creation since we will do // that as part of creating the backing index if required. IndexMetadata failureStoreIndex = null; - if (template.getDataStreamTemplate().hasFailureStore()) { + if (template.getDataStreamTemplate().hasFailureStore() && initializeFailureStore) { if (isSystem) { throw new IllegalArgumentException("Failure stores are not supported on system data streams"); } @@ -312,7 +320,8 @@ static ClusterState createDataStream( } assert writeIndex != null; assert writeIndex.mapping() != null : "no mapping found for backing index [" + writeIndex.getIndex().getName() + "]"; - assert template.getDataStreamTemplate().hasFailureStore() == false || failureStoreIndex != null; + assert template.getDataStreamTemplate().hasFailureStore() == false || initializeFailureStore == false || failureStoreIndex != null + : "failure store should have an initial index"; assert failureStoreIndex == null || failureStoreIndex.mapping() != null : "no mapping found for failure store [" + failureStoreIndex.getIndex().getName() + "]"; @@ -328,19 +337,20 @@ static ClusterState createDataStream( List failureIndices = failureStoreIndex == null ? List.of() : List.of(failureStoreIndex.getIndex()); DataStream newDataStream = new DataStream( dataStreamName, - dsBackingIndices, initialGeneration, template.metadata() != null ? Map.copyOf(template.metadata()) : null, hidden, false, isSystem, + System::currentTimeMillis, template.getDataStreamTemplate().isAllowCustomRouting(), indexMode, lifecycle == null && isDslOnlyMode ? DataStreamLifecycle.DEFAULT : lifecycle, template.getDataStreamTemplate().hasFailureStore(), - failureIndices, - false, - null + new DataStream.DataStreamIndices(DataStream.BACKING_INDEX_PREFIX, dsBackingIndices, false, null), + // If the failure store shouldn't be initialized on data stream creation, we're marking it for "lazy rollover", which will + // initialize the failure store on first write. + new DataStream.DataStreamIndices(DataStream.FAILURE_STORE_PREFIX, failureIndices, initializeFailureStore == false, null) ); Metadata.Builder builder = Metadata.builder(currentState.metadata()).put(newDataStream); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java index 56170ffb16cd3..9dbbdd597a4ce 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java @@ -165,7 +165,9 @@ static ClusterState migrateToDataStream( req, backingIndices, currentState.metadata().index(writeIndex), - listener + listener, + // No need to initialize the failure store when migrating to a data stream. + false ); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index c2edf9729b8b8..28c153c734b7c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -268,17 +268,6 @@ public void testDataStreamValidation() throws IOException { exception.getMessage(), equalTo("aliases, mappings, and index settings may not be specified when rolling over a data stream") ); - - exception = expectThrows( - IllegalArgumentException.class, - () -> MetadataRolloverService.validate(metadata, randomDataStream.getName(), null, req, true) - ); - assertThat( - exception.getMessage(), - equalTo( - "unable to roll over failure store because [" + randomDataStream.getName() + "] does not have the failure store enabled" - ) - ); } public void testGenerateRolloverIndexName() { diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index 20d826b11c1e7..4ca4e7158e454 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -25,7 +26,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexingPressure; @@ -146,7 +146,8 @@ void executeBulk( } @Override - void createIndex(String index, boolean requireDataStream, TimeValue timeout, ActionListener listener) { + void createIndex(CreateIndexRequest createIndexRequest, ActionListener listener) { + String index = createIndexRequest.index(); try { simulateAutoCreate.accept(index); // If we try to create an index just immediately assume it worked diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index 52d50b3a23a0d..d7adf3aa8b4e2 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; @@ -36,7 +37,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; @@ -174,7 +174,7 @@ void executeBulk( } @Override - void createIndex(String index, boolean requireDataStream, TimeValue timeout, ActionListener listener) { + void createIndex(CreateIndexRequest createIndexRequest, ActionListener listener) { indexCreated = true; listener.onResponse(null); } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java index c27263f43eff1..1a34b1e856a5e 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.bulk.TransportBulkActionTookTests.Resolver; import org.elasticsearch.action.delete.DeleteRequest; @@ -34,7 +35,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexVersion; @@ -105,7 +105,7 @@ class TestTransportBulkAction extends TransportBulkAction { } @Override - void createIndex(String index, boolean requireDataStream, TimeValue timeout, ActionListener listener) { + void createIndex(CreateIndexRequest createIndexRequest, ActionListener listener) { indexCreated = true; if (beforeIndexCreation != null) { beforeIndexCreation.run(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java index 47a6a03078b9a..590029f8537f7 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.ingest.SimulateIndexResponse; @@ -22,7 +23,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.indices.EmptySystemIndices; @@ -83,7 +83,7 @@ class TestTransportSimulateBulkAction extends TransportSimulateBulkAction { } @Override - void createIndex(String index, boolean requireDataStream, TimeValue timeout, ActionListener listener) { + void createIndex(CreateIndexRequest createIndexRequest, ActionListener listener) { indexCreated = true; if (beforeIndexCreation != null) { beforeIndexCreation.run(); @@ -192,7 +192,7 @@ public void onFailure(Exception e) { fail(e, "Unexpected error"); } }; - Map indicesToAutoCreate = Map.of(); // unused + Map indicesToAutoCreate = Map.of(); // unused Set dataStreamsToRollover = Set.of(); // unused Set failureStoresToRollover = Set.of(); // unused long startTime = 0; diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index 0277855db9c4c..07481a68c5176 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -353,24 +353,17 @@ public void testRemoveFailureStoreIndexThatDoesNotExist() { public void testRemoveFailureStoreWriteIndex() { DataStream original = createRandomDataStream(); + int indexToRemove = original.getFailureIndices().getIndices().size() - 1; - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> original.removeFailureStoreIndex( - original.getFailureIndices().getIndices().get(original.getFailureIndices().getIndices().size() - 1) - ) - ); - assertThat( - e.getMessage(), - equalTo( - String.format( - Locale.ROOT, - "cannot remove backing index [%s] of data stream [%s] because it is the write index of the failure store", - original.getFailureIndices().getIndices().get(original.getFailureIndices().getIndices().size() - 1).getName(), - original.getName() - ) - ) - ); + DataStream updated = original.removeFailureStoreIndex(original.getFailureIndices().getIndices().get(indexToRemove)); + assertThat(updated.getName(), equalTo(original.getName())); + assertThat(updated.getGeneration(), equalTo(original.getGeneration() + 1)); + assertThat(updated.getIndices().size(), equalTo(original.getIndices().size())); + assertThat(updated.getFailureIndices().getIndices().size(), equalTo(original.getFailureIndices().getIndices().size() - 1)); + assertThat(updated.getFailureIndices().isRolloverOnWrite(), equalTo(true)); + for (int k = 0; k < (original.getFailureIndices().getIndices().size() - 1); k++) { + assertThat(updated.getFailureIndices().getIndices().get(k), equalTo(original.getFailureIndices().getIndices().get(k))); + } } public void testAddBackingIndex() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java index f172d0e21743d..c900c3257a405 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java @@ -38,9 +38,11 @@ import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.newInstance; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.mock; @@ -65,7 +67,8 @@ public void testCreateDataStream() throws Exception { cs, true, req, - ActionListener.noop() + ActionListener.noop(), + false ); assertThat(newState.metadata().dataStreams().size(), equalTo(1)); assertThat(newState.metadata().dataStreams().get(dataStreamName).getName(), equalTo(dataStreamName)); @@ -105,7 +108,8 @@ public void testCreateDataStreamWithAliasFromTemplate() throws Exception { cs, randomBoolean(), req, - ActionListener.noop() + ActionListener.noop(), + false ); assertThat(newState.metadata().dataStreams().size(), equalTo(1)); assertThat(newState.metadata().dataStreams().get(dataStreamName).getName(), equalTo(dataStreamName)); @@ -182,7 +186,8 @@ public void testCreateDataStreamWithAliasFromComponentTemplate() throws Exceptio cs, randomBoolean(), req, - ActionListener.noop() + ActionListener.noop(), + false ); assertThat(newState.metadata().dataStreams().size(), equalTo(1)); assertThat(newState.metadata().dataStreams().get(dataStreamName).getName(), equalTo(dataStreamName)); @@ -219,7 +224,7 @@ private static AliasMetadata randomAlias(String prefix) { return builder.build(); } - public void testCreateDataStreamWithFailureStore() throws Exception { + public void testCreateDataStreamWithFailureStoreInitialized() throws Exception { final MetadataCreateIndexService metadataCreateIndexService = getMetadataCreateIndexService(); final String dataStreamName = "my-data-stream"; ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStreamName + "*")) @@ -235,7 +240,8 @@ public void testCreateDataStreamWithFailureStore() throws Exception { cs, randomBoolean(), req, - ActionListener.noop() + ActionListener.noop(), + true ); var backingIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, 1, req.getStartTime()); var failureStoreIndexName = DataStream.getDefaultFailureStoreName(dataStreamName, 1, req.getStartTime()); @@ -252,6 +258,39 @@ public void testCreateDataStreamWithFailureStore() throws Exception { assertThat(newState.metadata().index(failureStoreIndexName).isSystem(), is(false)); } + public void testCreateDataStreamWithFailureStoreUninitialized() throws Exception { + final MetadataCreateIndexService metadataCreateIndexService = getMetadataCreateIndexService(); + final String dataStreamName = "my-data-stream"; + ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStreamName + "*")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, true)) + .build(); + ClusterState cs = ClusterState.builder(new ClusterName("_name")) + .metadata(Metadata.builder().put("template", template).build()) + .build(); + CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); + ClusterState newState = MetadataCreateDataStreamService.createDataStream( + metadataCreateIndexService, + Settings.EMPTY, + cs, + randomBoolean(), + req, + ActionListener.noop(), + false + ); + var backingIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, 1, req.getStartTime()); + var failureStoreIndexName = DataStream.getDefaultFailureStoreName(dataStreamName, 1, req.getStartTime()); + assertThat(newState.metadata().dataStreams().size(), equalTo(1)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).getName(), equalTo(dataStreamName)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).isSystem(), is(false)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).isHidden(), is(false)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).isReplicated(), is(false)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).getFailureIndices().getIndices(), empty()); + assertThat(newState.metadata().index(backingIndexName), notNullValue()); + assertThat(newState.metadata().index(backingIndexName).getSettings().get("index.hidden"), equalTo("true")); + assertThat(newState.metadata().index(backingIndexName).isSystem(), is(false)); + assertThat(newState.metadata().index(failureStoreIndexName), nullValue()); + } + public void testCreateDataStreamWithFailureStoreWithRefreshRate() throws Exception { final MetadataCreateIndexService metadataCreateIndexService = getMetadataCreateIndexService(); var timeValue = randomTimeValue(); @@ -272,7 +311,8 @@ public void testCreateDataStreamWithFailureStoreWithRefreshRate() throws Excepti cs, randomBoolean(), req, - ActionListener.noop() + ActionListener.noop(), + true ); var backingIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, 1, req.getStartTime()); var failureStoreIndexName = DataStream.getDefaultFailureStoreName(dataStreamName, 1, req.getStartTime()); @@ -303,7 +343,8 @@ public void testCreateSystemDataStream() throws Exception { cs, randomBoolean(), req, - ActionListener.noop() + ActionListener.noop(), + false ); assertThat(newState.metadata().dataStreams().size(), equalTo(1)); assertThat(newState.metadata().dataStreams().get(dataStreamName).getName(), equalTo(dataStreamName)); @@ -336,7 +377,8 @@ public void testCreateDuplicateDataStream() throws Exception { cs, randomBoolean(), req, - ActionListener.noop() + ActionListener.noop(), + false ) ); assertThat(e.getMessage(), containsString("data_stream [" + dataStreamName + "] already exists")); @@ -355,7 +397,8 @@ public void testCreateDataStreamWithInvalidName() throws Exception { cs, randomBoolean(), req, - ActionListener.noop() + ActionListener.noop(), + false ) ); assertThat(e.getMessage(), containsString("must not contain the following characters")); @@ -374,7 +417,8 @@ public void testCreateDataStreamWithUppercaseCharacters() throws Exception { cs, randomBoolean(), req, - ActionListener.noop() + ActionListener.noop(), + false ) ); assertThat(e.getMessage(), containsString("data_stream [" + dataStreamName + "] must be lowercase")); @@ -393,7 +437,8 @@ public void testCreateDataStreamStartingWithPeriod() throws Exception { cs, randomBoolean(), req, - ActionListener.noop() + ActionListener.noop(), + false ) ); assertThat(e.getMessage(), containsString("data_stream [" + dataStreamName + "] must not start with '.ds-'")); @@ -412,7 +457,8 @@ public void testCreateDataStreamNoTemplate() throws Exception { cs, randomBoolean(), req, - ActionListener.noop() + ActionListener.noop(), + false ) ); assertThat(e.getMessage(), equalTo("no matching index template found for data stream [my-data-stream]")); @@ -434,7 +480,8 @@ public void testCreateDataStreamNoValidTemplate() throws Exception { cs, randomBoolean(), req, - ActionListener.noop() + ActionListener.noop(), + false ) ); assertThat( @@ -459,7 +506,8 @@ public static ClusterState createDataStream(final String dataStreamName) throws cs, randomBoolean(), req, - ActionListener.noop() + ActionListener.noop(), + false ); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index ed7587556bd28..560c98fbd210b 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -334,6 +334,9 @@ static DataStream updateLocalDataStream( // (and potentially even break things). remoteDataStream.getBackingIndices().copy().setIndices(List.of(backingIndexToFollow)).setRolloverOnWrite(false).build() ) + // Replicated data streams should not have the failure store marked for lazy rollover (which they do by default for lazy + // failure store creation). + .setFailureIndices(remoteDataStream.getFailureIndices().copy().setRolloverOnWrite(false).build()) .setReplicated(true) .build(); } else { diff --git a/x-pack/plugin/core/src/javaRestTest/java/org/elasticsearch/xpack/core/DataStreamRestIT.java b/x-pack/plugin/core/src/javaRestTest/java/org/elasticsearch/xpack/core/DataStreamRestIT.java index 083850e80dd47..4ff7a149bc8f0 100644 --- a/x-pack/plugin/core/src/javaRestTest/java/org/elasticsearch/xpack/core/DataStreamRestIT.java +++ b/x-pack/plugin/core/src/javaRestTest/java/org/elasticsearch/xpack/core/DataStreamRestIT.java @@ -74,6 +74,9 @@ public void testDSXpackUsage() throws Exception { indexRequest = new Request("POST", "/fs/_doc"); indexRequest.setJsonEntity("{\"@timestamp\": \"2020-01-01\"}"); client().performRequest(indexRequest); + // Initialize the failure store + rollover = new Request("POST", "/fs/_rollover?target_failure_store=true"); + client().performRequest(rollover); dataStreams = (Map) getLocation("/_xpack/usage").get("data_streams"); assertNotNull(dataStreams); From 06edab808a95318ba0ccdd3dd5e00d3904ecf294 Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Wed, 19 Jun 2024 12:28:59 +0100 Subject: [PATCH 25/44] AwaitsFix: https://github.com/elastic/elasticsearch/issues/109915 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 2bd400c47a7aa..86ddd0b26e925 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -78,6 +78,9 @@ tests: method: "testFetchAllEntities" - class: "org.elasticsearch.xpack.ml.integration.AutodetectMemoryLimitIT" issue: "https://github.com/elastic/elasticsearch/issues/109904" +- class: "org.elasticsearch.compute.operator.topn.TopNOperatorTests" + issue: "https://github.com/elastic/elasticsearch/issues/109915" + method: "testRandomMultiValuesTopN" # Examples: # From 3dcc08e89693e613e9ff2c73ad74db9b63686600 Mon Sep 17 00:00:00 2001 From: Tim Grein Date: Wed, 19 Jun 2024 14:21:36 +0200 Subject: [PATCH 26/44] [Inference API] Use random task type in random model configuration (#109861) --- .../xpack/inference/ModelConfigurationsTests.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/ModelConfigurationsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/ModelConfigurationsTests.java index d52595a5899a8..5afae297b3592 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/ModelConfigurationsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/ModelConfigurationsTests.java @@ -20,8 +20,7 @@ public class ModelConfigurationsTests extends AbstractWireSerializingTestCase { public static ModelConfigurations createRandomInstance() { - // TODO randomise task types and settings - var taskType = TaskType.SPARSE_EMBEDDING; + var taskType = randomFrom(TaskType.values()); return new ModelConfigurations( randomAlphaOfLength(6), taskType, From 6c8cebcdb5c615d3729185537f1e21fc5af62315 Mon Sep 17 00:00:00 2001 From: Tim Grein Date: Wed, 19 Jun 2024 14:21:54 +0200 Subject: [PATCH 27/44] [Inference API] Use randomOtherValueThan to avoid flaky test (#109859) --- .../inference/action/PutInferenceModelResponseTests.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/PutInferenceModelResponseTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/PutInferenceModelResponseTests.java index 89bd0247a9ccf..d88aa91ff5148 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/PutInferenceModelResponseTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/PutInferenceModelResponseTests.java @@ -23,8 +23,10 @@ protected PutInferenceModelAction.Response createTestInstance() { @Override protected PutInferenceModelAction.Response mutateInstance(PutInferenceModelAction.Response instance) { - var mutatedModel = ModelConfigurationsTests.mutateTestInstance(instance.getModel()); - return new PutInferenceModelAction.Response(mutatedModel); + return randomValueOtherThan(instance, () -> { + var mutatedModel = ModelConfigurationsTests.mutateTestInstance(instance.getModel()); + return new PutInferenceModelAction.Response(mutatedModel); + }); } @Override From 3301ceffec699e8ce6abd994b3e5ad66ca585d53 Mon Sep 17 00:00:00 2001 From: Tim Grein Date: Wed, 19 Jun 2024 14:23:03 +0200 Subject: [PATCH 28/44] [Inference API] Use randomOtherValueThan to avoid flaky test (#109857) --- .../elasticsearch/xpack/inference/ModelSecretsTests.java | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/ModelSecretsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/ModelSecretsTests.java index ac7fc6ba56952..d6d139190c12c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/ModelSecretsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/ModelSecretsTests.java @@ -27,10 +27,6 @@ public static ModelSecrets createRandomInstance() { return new ModelSecrets(randomSecretSettings()); } - public static ModelSecrets mutateTestInstance(ModelSecrets instance) { - return createRandomInstance(); - } - private static SecretSettings randomSecretSettings() { return new FakeSecretSettings(randomAlphaOfLengthBetween(8, 10)); } @@ -54,7 +50,7 @@ protected ModelSecrets createTestInstance() { @Override protected ModelSecrets mutateInstance(ModelSecrets instance) { - return mutateTestInstance(instance); + return randomValueOtherThan(instance, ModelSecretsTests::createRandomInstance); } public record FakeSecretSettings(String apiKey) implements SecretSettings { From c900743ba8406d145b7e9dedcf4e848d775f793b Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 19 Jun 2024 08:41:56 -0400 Subject: [PATCH 29/44] ESQL Move serialization of MV_ functions (#109892) This moves the serialization of the `MV_` functions from `PlanNamedTypes` to their `NamedWriteable` to line up better with the way the rest of Elasticsearch works. Co-authored-by: Elastic Machine --- .../function/scalar/BinaryScalarFunction.java | 20 +++ .../AbstractMultivalueFunction.java | 37 ++++++ .../function/scalar/multivalue/MvAppend.java | 23 ++++ .../function/scalar/multivalue/MvAvg.java | 14 +++ .../function/scalar/multivalue/MvConcat.java | 14 +++ .../function/scalar/multivalue/MvCount.java | 14 +++ .../function/scalar/multivalue/MvDedupe.java | 14 +++ .../function/scalar/multivalue/MvFirst.java | 14 +++ .../function/scalar/multivalue/MvLast.java | 14 +++ .../function/scalar/multivalue/MvMax.java | 14 +++ .../function/scalar/multivalue/MvMedian.java | 14 +++ .../function/scalar/multivalue/MvMin.java | 14 +++ .../function/scalar/multivalue/MvSlice.java | 46 ++++++- .../function/scalar/multivalue/MvSort.java | 48 +++++++- .../function/scalar/multivalue/MvSum.java | 14 +++ .../function/scalar/multivalue/MvZip.java | 53 +++++++- .../arithmetic/EsqlArithmeticOperation.java | 8 -- .../InsensitiveBinaryComparison.java | 12 +- .../xpack/esql/io/stream/PlanNamedTypes.java | 114 +----------------- .../AbstractExpressionSerializationTests.java | 13 +- .../AbstractMvSerializationTests.java | 21 ++++ .../MvAppendSerializationTests.java | 42 +++++++ .../multivalue/MvAvgSerializationTests.java | 29 +++++ .../MvConcatSerializationTests.java | 37 ++++++ .../multivalue/MvCountSerializationTests.java | 29 +++++ .../MvDedupeSerializationTests.java | 29 +++++ .../multivalue/MvFirstSerializationTests.java | 29 +++++ .../multivalue/MvLastSerializationTests.java | 29 +++++ .../multivalue/MvMaxSerializationTests.java | 29 +++++ .../MvMedianSerializationTests.java | 29 +++++ .../multivalue/MvMinSerializationTests.java | 29 +++++ .../multivalue/MvSliceSerializationTests.java | 44 +++++++ .../multivalue/MvSortSerializationTests.java | 37 ++++++ .../multivalue/MvSumSerializationTests.java | 29 +++++ .../multivalue/MvZipSerializationTests.java | 44 +++++++ 35 files changed, 863 insertions(+), 137 deletions(-) create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMvSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipSerializationTests.java diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/BinaryScalarFunction.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/BinaryScalarFunction.java index f96aeb693b52a..4b462719a375b 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/BinaryScalarFunction.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/BinaryScalarFunction.java @@ -6,9 +6,14 @@ */ package org.elasticsearch.xpack.esql.core.expression.function.scalar; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.util.PlanStreamInput; +import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; +import java.io.IOException; import java.util.Arrays; import java.util.List; @@ -22,6 +27,21 @@ protected BinaryScalarFunction(Source source, Expression left, Expression right) this.right = right; } + protected BinaryScalarFunction(StreamInput in) throws IOException { + this( + Source.readFrom((StreamInput & PlanStreamInput) in), + ((PlanStreamInput) in).readExpression(), + ((PlanStreamInput) in).readExpression() + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + ((PlanStreamOutput) out).writeExpression(left()); + ((PlanStreamOutput) out).writeExpression(right()); + } + @Override public final BinaryScalarFunction replaceChildren(List newChildren) { Expression newLeft = newChildren.get(0); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java index 5aa6dad7b2a5b..9b7e0b729cde9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java @@ -7,6 +7,9 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverContext; @@ -15,7 +18,12 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; + +import java.io.IOException; +import java.util.List; /** * Base class for functions that reduce multivalued fields into single valued fields. @@ -25,10 +33,39 @@ *

    */ public abstract class AbstractMultivalueFunction extends UnaryScalarFunction { + public static List getNamedWriteables() { + return List.of( + MvAppend.ENTRY, + MvAvg.ENTRY, + MvConcat.ENTRY, + MvCount.ENTRY, + MvDedupe.ENTRY, + MvFirst.ENTRY, + MvLast.ENTRY, + MvMax.ENTRY, + MvMedian.ENTRY, + MvMin.ENTRY, + MvSlice.ENTRY, + MvSort.ENTRY, + MvSum.ENTRY, + MvZip.ENTRY + ); + } + protected AbstractMultivalueFunction(Source source, Expression field) { super(source, field); } + protected AbstractMultivalueFunction(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in), ((PlanStreamInput) in).readExpression()); + } + + @Override + public final void writeTo(StreamOutput out) throws IOException { + Source.EMPTY.writeTo(out); + ((PlanStreamOutput) out).writeExpression(field); + } + /** * Build the evaluator given the evaluator a multivalued field. */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java index 1f37c15ecfc43..99844d40e0565 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java @@ -8,6 +8,9 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; @@ -25,9 +28,11 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.Objects; @@ -41,6 +46,8 @@ * Appends values to a multi-value */ public class MvAppend extends EsqlScalarFunction implements EvaluatorMapper { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvAppend", MvAppend::new); + private final Expression field1, field2; private DataType dataType; @@ -103,6 +110,22 @@ public MvAppend( this.field2 = field2; } + private MvAppend(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in), ((PlanStreamInput) in).readExpression(), ((PlanStreamInput) in).readExpression()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + Source.EMPTY.writeTo(out); + out.writeNamedWriteable(field1); + out.writeNamedWriteable(field2); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected TypeResolution resolveType() { if (childrenResolved() == false) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvg.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvg.java index 787bf3e5efd1c..01f24365be225 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvg.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvg.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.MvEvaluator; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; @@ -21,6 +23,7 @@ import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import java.io.IOException; import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; @@ -31,6 +34,8 @@ * Reduce a multivalued field to a single valued field containing the average value. */ public class MvAvg extends AbstractMultivalueFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvAvg", MvAvg::new); + @FunctionInfo( returnType = "double", description = "Converts a multivalued field into a single valued field containing the average of all of the values.", @@ -47,6 +52,15 @@ public MvAvg( super(source, field); } + private MvAvg(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected TypeResolution resolveFieldType() { return isType(field(), t -> t.isNumeric() && isRepresentable(t), sourceText(), null, "numeric"); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcat.java index 3e37a739147cf..fa9475055515f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcat.java @@ -9,6 +9,8 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.Page; @@ -25,6 +27,7 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; +import java.io.IOException; import java.util.function.Function; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; @@ -33,6 +36,8 @@ * Reduce a multivalued string field to a single valued field by concatenating all values. */ public class MvConcat extends BinaryScalarFunction implements EvaluatorMapper { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvConcat", MvConcat::new); + @FunctionInfo( returnType = "keyword", description = "Converts a multivalued string expression into a single valued column " @@ -53,6 +58,15 @@ public MvConcat( super(source, field, delim); } + private MvConcat(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected TypeResolution resolveType() { if (childrenResolved() == false) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java index b2afef4f2235e..faf7d36e4a24c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; @@ -20,6 +22,7 @@ import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import java.io.IOException; import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; @@ -28,6 +31,8 @@ * Reduce a multivalued field to a single valued field containing the count of values. */ public class MvCount extends AbstractMultivalueFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvCount", MvCount::new); + @FunctionInfo( returnType = "integer", description = "Converts a multivalued expression into a single valued column containing a count of the number of values.", @@ -58,6 +63,15 @@ public MvCount( super(source, v); } + private MvCount(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected TypeResolution resolveFieldType() { return isType(field(), EsqlDataTypes::isRepresentable, sourceText(), null, "representable"); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java index 71cf759b3dbe5..d17bc26ab808b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.compute.operator.mvdedupe.MultivalueDedupe; import org.elasticsearch.xpack.esql.core.expression.Expression; @@ -18,6 +20,7 @@ import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import java.io.IOException; import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; @@ -26,6 +29,8 @@ * Removes duplicate values from a multivalued field. */ public class MvDedupe extends AbstractMultivalueFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvDedupe", MvDedupe::new); + // @TODO: add unsigned_long @FunctionInfo( returnType = { @@ -70,6 +75,15 @@ public MvDedupe( super(source, field); } + private MvDedupe(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected TypeResolution resolveFieldType() { return isType(field(), EsqlDataTypes::isRepresentable, sourceText(), null, "representable"); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java index a985c10824ae7..25e6a85a485c1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java @@ -8,6 +8,8 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.MvEvaluator; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; @@ -26,6 +28,7 @@ import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import java.io.IOException; import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; @@ -34,6 +37,8 @@ * Reduce a multivalued field to a single valued field containing the minimum value. */ public class MvFirst extends AbstractMultivalueFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvFirst", MvFirst::new); + @FunctionInfo( returnType = { "boolean", @@ -87,6 +92,15 @@ public MvFirst( super(source, field); } + private MvFirst(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected TypeResolution resolveFieldType() { return isType(field(), EsqlDataTypes::isRepresentable, sourceText(), null, "representable"); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java index 8dcc4c8b1222e..2a9a498ecf9d3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java @@ -8,6 +8,8 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.MvEvaluator; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; @@ -26,6 +28,7 @@ import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import java.io.IOException; import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; @@ -34,6 +37,8 @@ * Reduce a multivalued field to a single valued field containing the minimum value. */ public class MvLast extends AbstractMultivalueFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvLast", MvLast::new); + @FunctionInfo( returnType = { "boolean", @@ -87,6 +92,15 @@ public MvLast( super(source, field); } + private MvLast(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected TypeResolution resolveFieldType() { return isType(field(), EsqlDataTypes::isRepresentable, sourceText(), null, "representable"); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java index 7cfc4a94b35d4..24873cc1da2e9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java @@ -8,6 +8,8 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.MvEvaluator; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; @@ -20,6 +22,7 @@ import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import java.io.IOException; import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; @@ -30,6 +33,8 @@ * Reduce a multivalued field to a single valued field containing the maximum value. */ public class MvMax extends AbstractMultivalueFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvMax", MvMax::new); + @FunctionInfo( returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "unsigned_long", "version" }, description = "Converts a multivalued expression into a single valued column containing the maximum value.", @@ -53,6 +58,15 @@ public MvMax( super(source, v); } + private MvMax(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected TypeResolution resolveFieldType() { return isType(field(), t -> isSpatial(t) == false && isRepresentable(t), sourceText(), null, "representableNonSpatial"); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedian.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedian.java index 8d3177926f2e6..4e7d6dd4e29b2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedian.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedian.java @@ -8,6 +8,8 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; import org.apache.lucene.util.ArrayUtil; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.MvEvaluator; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.IntBlock; @@ -23,6 +25,7 @@ import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import java.io.IOException; import java.math.BigInteger; import java.util.Arrays; import java.util.List; @@ -36,6 +39,8 @@ * Reduce a multivalued field to a single valued field containing the average value. */ public class MvMedian extends AbstractMultivalueFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvMedian", MvMedian::new); + @FunctionInfo( returnType = { "double", "integer", "long", "unsigned_long" }, description = "Converts a multivalued field into a single valued field containing the median value.", @@ -60,6 +65,15 @@ public MvMedian( super(source, field); } + private MvMedian(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected TypeResolution resolveFieldType() { return isType(field(), t -> t.isNumeric() && isRepresentable(t), sourceText(), null, "numeric"); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java index e52e72c766a3d..205a09953fde3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java @@ -8,6 +8,8 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.MvEvaluator; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; @@ -20,6 +22,7 @@ import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import java.io.IOException; import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; @@ -30,6 +33,8 @@ * Reduce a multivalued field to a single valued field containing the minimum value. */ public class MvMin extends AbstractMultivalueFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvMin", MvMin::new); + @FunctionInfo( returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "unsigned_long", "version" }, description = "Converts a multivalued expression into a single valued column containing the minimum value.", @@ -53,6 +58,15 @@ public MvMin( super(source, field); } + private MvMin(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected TypeResolution resolveFieldType() { return isType(field(), t -> isSpatial(t) == false && isRepresentable(t), sourceText(), null, "representableNonSpatial"); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java index 40e9f90df9dc6..f824d0821cfbf 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java @@ -8,6 +8,9 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; @@ -23,14 +26,17 @@ import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.function.Function; @@ -46,6 +52,8 @@ * Returns a subset of the multivalued field using the start and end index values. */ public class MvSlice extends EsqlScalarFunction implements OptionalArgument, EvaluatorMapper { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvSlice", MvSlice::new); + private final Expression field, start, end; @FunctionInfo( @@ -103,7 +111,43 @@ public MvSlice( super(source, end == null ? Arrays.asList(field, start, start) : Arrays.asList(field, start, end)); this.field = field; this.start = start; - this.end = end == null ? start : end; + this.end = end; + } + + private MvSlice(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + ((PlanStreamInput) in).readExpression(), + ((PlanStreamInput) in).readExpression(), + // TODO readOptionalNamedWriteable + in.readOptionalWriteable(i -> ((PlanStreamInput) i).readExpression()) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + Source.EMPTY.writeTo(out); + ((PlanStreamOutput) out).writeExpression(field); + ((PlanStreamOutput) out).writeExpression(start); + // TODO writeOptionalNamedWriteable + out.writeOptionalWriteable(end == null ? null : o -> ((PlanStreamOutput) o).writeExpression(end)); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + Expression field() { + return field; + } + + Expression start() { + return start; + } + + Expression end() { + return end; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java index 744491b30f702..fd5f493ae405e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java @@ -9,6 +9,9 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.TriFunction; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; @@ -33,13 +36,16 @@ import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.function.Function; @@ -54,6 +60,8 @@ * Sorts a multivalued field in lexicographical order. */ public class MvSort extends EsqlScalarFunction implements OptionalArgument, Validatable { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvSort", MvSort::new); + private final Expression field, order; private static final Literal ASC = new Literal(Source.EMPTY, "ASC", DataType.KEYWORD); @@ -79,7 +87,37 @@ public MvSort( ) { super(source, order == null ? Arrays.asList(field, ASC) : Arrays.asList(field, order)); this.field = field; - this.order = order == null ? ASC : order; + this.order = order; + } + + private MvSort(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + ((PlanStreamInput) in).readExpression(), + // TODO readOptionalNamedWriteable + in.readOptionalWriteable(i -> ((PlanStreamInput) i).readExpression()) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + ((PlanStreamOutput) out).writeExpression(field); + // TODO writeOptionalNamedWriteable + out.writeOptionalWriteable(order == null ? null : o -> ((PlanStreamOutput) o).writeExpression(order)); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + Expression field() { + return field; + } + + Expression order() { + return order; } @Override @@ -93,6 +131,9 @@ protected TypeResolution resolveType() { if (resolution.unresolved()) { return resolution; } + if (order == null) { + return resolution; + } return isString(order, sourceText(), SECOND); } @@ -106,7 +147,10 @@ public boolean foldable() { public EvalOperator.ExpressionEvaluator.Factory toEvaluator( Function toEvaluator ) { - boolean ordering = order.foldable() && ((BytesRef) order.fold()).utf8ToString().equalsIgnoreCase("DESC") ? false : true; + Expression nonNullOrder = order == null ? ASC : order; + boolean ordering = nonNullOrder.foldable() && ((BytesRef) nonNullOrder.fold()).utf8ToString().equalsIgnoreCase("DESC") + ? false + : true; return switch (PlannerUtils.toElementType(field.dataType())) { case BOOLEAN -> new MvSort.EvaluatorFactory( toEvaluator.apply(field), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSum.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSum.java index e14bc401a058a..eabf5e20ad1b0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSum.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSum.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.MvEvaluator; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; @@ -21,6 +23,7 @@ import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import java.io.IOException; import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; @@ -31,6 +34,8 @@ * Reduce a multivalued field to a single valued field containing the sum of all values. */ public class MvSum extends AbstractMultivalueFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvSum", MvSum::new); + @FunctionInfo( returnType = { "double", "integer", "long", "unsigned_long" }, description = "Converts a multivalued field into a single valued field containing the sum of all of the values.", @@ -47,6 +52,15 @@ public MvSum( super(source, field); } + private MvSum(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected TypeResolution resolveFieldType() { return isType(field(), t -> t.isNumeric() && isRepresentable(t), sourceText(), null, "numeric"); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZip.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZip.java index 4f42858cbedba..15bd09a4089e6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZip.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZip.java @@ -9,6 +9,9 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.operator.EvalOperator; @@ -19,12 +22,15 @@ import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.function.Function; @@ -38,6 +44,8 @@ * Combines the values from two multivalued fields with a delimiter that joins them together. */ public class MvZip extends EsqlScalarFunction implements OptionalArgument, EvaluatorMapper { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvZip", MvZip::new); + private final Expression mvLeft, mvRight, delim; private static final Literal COMMA = new Literal(Source.EMPTY, ",", DataType.TEXT); @@ -60,7 +68,31 @@ public MvZip( super(source, delim == null ? Arrays.asList(mvLeft, mvRight, COMMA) : Arrays.asList(mvLeft, mvRight, delim)); this.mvLeft = mvLeft; this.mvRight = mvRight; - this.delim = delim == null ? COMMA : delim; + this.delim = delim; + } + + private MvZip(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + ((PlanStreamInput) in).readExpression(), + ((PlanStreamInput) in).readExpression(), + // TODO readOptionalNamedWriteable + in.readOptionalWriteable(i -> ((PlanStreamInput) i).readExpression()) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + Source.EMPTY.writeTo(out); + ((PlanStreamOutput) out).writeExpression(mvLeft); + ((PlanStreamOutput) out).writeExpression(mvRight); + // TODO writeOptionalNamedWriteable + out.writeOptionalWriteable(delim == null ? null : o -> ((PlanStreamOutput) o).writeExpression(delim)); + } + + @Override + public String getWriteableName() { + return ENTRY.name; } @Override @@ -104,7 +136,12 @@ public Nullability nullable() { public EvalOperator.ExpressionEvaluator.Factory toEvaluator( Function toEvaluator ) { - return new MvZipEvaluator.Factory(source(), toEvaluator.apply(mvLeft), toEvaluator.apply(mvRight), toEvaluator.apply(delim)); + return new MvZipEvaluator.Factory( + source(), + toEvaluator.apply(mvLeft), + toEvaluator.apply(mvRight), + toEvaluator.apply(delim == null ? COMMA : delim) + ); } @Override @@ -195,4 +232,16 @@ static void process(BytesRefBlock.Builder builder, int position, BytesRefBlock l } builder.endPositionEntry(); } + + Expression mvLeft() { + return mvLeft; + } + + Expression mvRight() { + return mvRight; + } + + Expression delim() { + return delim; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java index 89931d7a6f4d1..7ab6d96181f53 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java @@ -20,7 +20,6 @@ import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cast; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; import java.io.IOException; @@ -127,13 +126,6 @@ public interface BinaryEvaluator { ); } - @Override - public void writeTo(StreamOutput out) throws IOException { - source().writeTo(out); - ((PlanStreamOutput) out).writeExpression(left()); - ((PlanStreamOutput) out).writeExpression(right()); - } - @Override public Object fold() { return EvaluatorMapper.super.fold(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveBinaryComparison.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveBinaryComparison.java index 137723de24edd..1ce87094e50b3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveBinaryComparison.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveBinaryComparison.java @@ -7,13 +7,10 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.function.scalar.BinaryScalarFunction; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import java.io.IOException; @@ -24,14 +21,7 @@ protected InsensitiveBinaryComparison(Source source, Expression left, Expression } protected InsensitiveBinaryComparison(StreamInput in) throws IOException { - this(Source.readFrom((PlanStreamInput) in), ((PlanStreamInput) in).readExpression(), ((PlanStreamInput) in).readExpression()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - source().writeTo(out); - ((PlanStreamOutput) out).writeExpression(left()); - ((PlanStreamOutput) out).writeExpression(right()); + super(in); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index 59cbfca89112f..be5e105c3398e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -81,20 +81,6 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.math.Round; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Tau; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.AbstractMultivalueFunction; -import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvAppend; -import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvAvg; -import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvConcat; -import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvCount; -import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvDedupe; -import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvFirst; -import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvLast; -import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMax; -import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMedian; -import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; -import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSlice; -import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSort; -import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSum; -import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvZip; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialContains; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialDisjoint; @@ -312,27 +298,13 @@ public static List namedTypeEntries() { of(AggregateFunction.class, Percentile.class, PlanNamedTypes::writePercentile, PlanNamedTypes::readPercentile), of(AggregateFunction.class, SpatialCentroid.class, PlanNamedTypes::writeAggFunction, PlanNamedTypes::readAggFunction), of(AggregateFunction.class, Sum.class, PlanNamedTypes::writeAggFunction, PlanNamedTypes::readAggFunction), - of(AggregateFunction.class, Values.class, PlanNamedTypes::writeAggFunction, PlanNamedTypes::readAggFunction), - // Multivalue functions - of(ScalarFunction.class, MvAppend.class, PlanNamedTypes::writeMvAppend, PlanNamedTypes::readMvAppend), - of(ScalarFunction.class, MvAvg.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), - of(ScalarFunction.class, MvCount.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), - of(ScalarFunction.class, MvConcat.class, PlanNamedTypes::writeMvConcat, PlanNamedTypes::readMvConcat), - of(ScalarFunction.class, MvDedupe.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), - of(ScalarFunction.class, MvFirst.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), - of(ScalarFunction.class, MvLast.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), - of(ScalarFunction.class, MvMax.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), - of(ScalarFunction.class, MvMedian.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), - of(ScalarFunction.class, MvMin.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), - of(ScalarFunction.class, MvSort.class, PlanNamedTypes::writeMvSort, PlanNamedTypes::readMvSort), - of(ScalarFunction.class, MvSlice.class, PlanNamedTypes::writeMvSlice, PlanNamedTypes::readMvSlice), - of(ScalarFunction.class, MvSum.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), - of(ScalarFunction.class, MvZip.class, PlanNamedTypes::writeMvZip, PlanNamedTypes::readMvZip) + of(AggregateFunction.class, Values.class, PlanNamedTypes::writeAggFunction, PlanNamedTypes::readAggFunction) ); List entries = new ArrayList<>(declared); // From NamedWriteables for (List ee : List.of( + AbstractMultivalueFunction.getNamedWriteables(), EsqlArithmeticOperation.getNamedWriteables(), EsqlBinaryComparison.getNamedWriteables(), FullTextPredicate.getNamedWriteables(), @@ -1429,38 +1401,6 @@ static void writeAggFunction(PlanStreamOutput out, AggregateFunction aggregateFu out.writeExpression(aggregateFunction.field()); } - // -- Multivalue functions - static final Map> MV_CTRS = Map.ofEntries( - entry(name(MvAvg.class), MvAvg::new), - entry(name(MvCount.class), MvCount::new), - entry(name(MvDedupe.class), MvDedupe::new), - entry(name(MvFirst.class), MvFirst::new), - entry(name(MvLast.class), MvLast::new), - entry(name(MvMax.class), MvMax::new), - entry(name(MvMedian.class), MvMedian::new), - entry(name(MvMin.class), MvMin::new), - entry(name(MvSum.class), MvSum::new) - ); - - static AbstractMultivalueFunction readMvFunction(PlanStreamInput in, String name) throws IOException { - return MV_CTRS.get(name).apply(Source.readFrom(in), in.readExpression()); - } - - static void writeMvFunction(PlanStreamOutput out, AbstractMultivalueFunction fn) throws IOException { - Source.EMPTY.writeTo(out); - out.writeExpression(fn.field()); - } - - static MvConcat readMvConcat(PlanStreamInput in) throws IOException { - return new MvConcat(Source.readFrom(in), in.readExpression(), in.readExpression()); - } - - static void writeMvConcat(PlanStreamOutput out, MvConcat fn) throws IOException { - Source.EMPTY.writeTo(out); - out.writeExpression(fn.left()); - out.writeExpression(fn.right()); - } - // -- ancillary supporting classes of plan nodes, etc static EsQueryExec.FieldSort readFieldSort(PlanStreamInput in) throws IOException { @@ -1514,54 +1454,4 @@ static void writeLog(PlanStreamOutput out, Log log) throws IOException { out.writeExpression(fields.get(0)); out.writeOptionalWriteable(fields.size() == 2 ? o -> out.writeExpression(fields.get(1)) : null); } - - static MvSort readMvSort(PlanStreamInput in) throws IOException { - return new MvSort(Source.readFrom(in), in.readExpression(), in.readOptionalNamed(Expression.class)); - } - - static void writeMvSort(PlanStreamOutput out, MvSort mvSort) throws IOException { - mvSort.source().writeTo(out); - List fields = mvSort.children(); - assert fields.size() == 1 || fields.size() == 2; - out.writeExpression(fields.get(0)); - out.writeOptionalWriteable(fields.size() == 2 ? o -> out.writeExpression(fields.get(1)) : null); - } - - static MvSlice readMvSlice(PlanStreamInput in) throws IOException { - return new MvSlice(Source.readFrom(in), in.readExpression(), in.readExpression(), in.readOptionalNamed(Expression.class)); - } - - static void writeMvSlice(PlanStreamOutput out, MvSlice fn) throws IOException { - Source.EMPTY.writeTo(out); - List fields = fn.children(); - assert fields.size() == 2 || fields.size() == 3; - out.writeExpression(fields.get(0)); - out.writeExpression(fields.get(1)); - out.writeOptionalWriteable(fields.size() == 3 ? o -> out.writeExpression(fields.get(2)) : null); - } - - static MvZip readMvZip(PlanStreamInput in) throws IOException { - return new MvZip(Source.readFrom(in), in.readExpression(), in.readExpression(), in.readOptionalNamed(Expression.class)); - } - - static void writeMvZip(PlanStreamOutput out, MvZip fn) throws IOException { - Source.EMPTY.writeTo(out); - List fields = fn.children(); - assert fields.size() == 2 || fields.size() == 3; - out.writeExpression(fields.get(0)); - out.writeExpression(fields.get(1)); - out.writeOptionalWriteable(fields.size() == 3 ? o -> out.writeExpression(fields.get(2)) : null); - } - - static MvAppend readMvAppend(PlanStreamInput in) throws IOException { - return new MvAppend(Source.readFrom(in), in.readExpression(), in.readExpression()); - } - - static void writeMvAppend(PlanStreamOutput out, MvAppend fn) throws IOException { - Source.EMPTY.writeTo(out); - List fields = fn.children(); - assert fields.size() == 2; - out.writeExpression(fields.get(0)); - out.writeExpression(fields.get(1)); - } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java index 5a794c3ff7730..9b33af9f0a2e0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import org.elasticsearch.xpack.esql.session.EsqlConfigurationSerializationTests; +import org.junit.Before; import java.io.IOException; import java.util.ArrayList; @@ -32,6 +33,12 @@ import static org.hamcrest.Matchers.sameInstance; public abstract class AbstractExpressionSerializationTests extends AbstractWireTestCase { + /** + * We use a single random config for all serialization because it's pretty + * heavy to build, especially in {@link #testConcurrentSerialization()}. + */ + private EsqlConfiguration config; + public static Source randomSource() { int lineNumber = between(0, EXAMPLE_QUERY.length - 1); int offset = between(0, EXAMPLE_QUERY[lineNumber].length() - 2); @@ -46,7 +53,6 @@ public static Expression randomChild() { @Override protected final T copyInstance(T instance, TransportVersion version) throws IOException { - EsqlConfiguration config = EsqlConfigurationSerializationTests.randomConfiguration(String.join("\n", EXAMPLE_QUERY), Map.of()); return copyInstance( instance, getNamedWriteableRegistry(), @@ -91,4 +97,9 @@ protected final NamedWriteableRegistry getNamedWriteableRegistry() { "I understand equations, both the simple and quadratical,", "About binomial theorem I'm teeming with a lot o' news,", "With many cheerful facts about the square of the hypotenuse." }; + + @Before + public void initConfig() { + config = EsqlConfigurationSerializationTests.randomConfiguration(String.join("\n", EXAMPLE_QUERY), Map.of()); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMvSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMvSerializationTests.java new file mode 100644 index 0000000000000..fba33c9ea1c03 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMvSerializationTests.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.util.List; + +public abstract class AbstractMvSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected List getNamedWriteables() { + return AbstractMultivalueFunction.getNamedWriteables(); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendSerializationTests.java new file mode 100644 index 0000000000000..8afd1b44dc3f3 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendSerializationTests.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class MvAppendSerializationTests extends AbstractMvSerializationTests { + @Override + protected MvAppend createTestInstance() { + Source source = randomSource(); + Expression field1 = randomChild(); + Expression field2 = randomChild(); + return new MvAppend(source, field1, field2); + } + + @Override + protected MvAppend mutateInstance(MvAppend instance) throws IOException { + Source source = randomSource(); + Expression field1 = randomChild(); + Expression field2 = randomChild(); + if (randomBoolean()) { + field1 = randomValueOtherThan(field1, AbstractExpressionSerializationTests::randomChild); + } else { + field2 = randomValueOtherThan(field2, AbstractExpressionSerializationTests::randomChild); + } + return new MvAppend(source, field1, field2); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgSerializationTests.java new file mode 100644 index 0000000000000..f70702b001492 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgSerializationTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class MvAvgSerializationTests extends AbstractMvSerializationTests { + @Override + protected MvAvg createTestInstance() { + return new MvAvg(randomSource(), randomChild()); + } + + @Override + protected MvAvg mutateInstance(MvAvg instance) throws IOException { + return new MvAvg(instance.source(), randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild)); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatSerializationTests.java new file mode 100644 index 0000000000000..9f2aba8d9d9ca --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatSerializationTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class MvConcatSerializationTests extends AbstractMvSerializationTests { + @Override + protected MvConcat createTestInstance() { + Source source = randomSource(); + Expression left = randomChild(); + Expression right = randomChild(); + return new MvConcat(source, left, right); + } + + @Override + protected MvConcat mutateInstance(MvConcat instance) throws IOException { + Source source = instance.source(); + Expression left = instance.left(); + Expression right = instance.right(); + if (randomBoolean()) { + left = randomValueOtherThan(left, AbstractExpressionSerializationTests::randomChild); + } else { + right = randomValueOtherThan(right, AbstractExpressionSerializationTests::randomChild); + } + return new MvConcat(source, left, right); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountSerializationTests.java new file mode 100644 index 0000000000000..a0d28a6cf925b --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountSerializationTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class MvCountSerializationTests extends AbstractMvSerializationTests { + @Override + protected MvCount createTestInstance() { + return new MvCount(randomSource(), randomChild()); + } + + @Override + protected MvCount mutateInstance(MvCount instance) throws IOException { + return new MvCount(instance.source(), randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild)); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeSerializationTests.java new file mode 100644 index 0000000000000..afb2ec90e1e3e --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeSerializationTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class MvDedupeSerializationTests extends AbstractMvSerializationTests { + @Override + protected MvDedupe createTestInstance() { + return new MvDedupe(randomSource(), randomChild()); + } + + @Override + protected MvDedupe mutateInstance(MvDedupe instance) throws IOException { + return new MvDedupe(instance.source(), randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild)); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstSerializationTests.java new file mode 100644 index 0000000000000..dbb49bb96a663 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstSerializationTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class MvFirstSerializationTests extends AbstractMvSerializationTests { + @Override + protected MvFirst createTestInstance() { + return new MvFirst(randomSource(), randomChild()); + } + + @Override + protected MvFirst mutateInstance(MvFirst instance) throws IOException { + return new MvFirst(instance.source(), randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild)); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastSerializationTests.java new file mode 100644 index 0000000000000..190eb0263c162 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastSerializationTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class MvLastSerializationTests extends AbstractMvSerializationTests { + @Override + protected MvLast createTestInstance() { + return new MvLast(randomSource(), randomChild()); + } + + @Override + protected MvLast mutateInstance(MvLast instance) throws IOException { + return new MvLast(instance.source(), randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild)); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxSerializationTests.java new file mode 100644 index 0000000000000..ffc51af5f103d --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxSerializationTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class MvMaxSerializationTests extends AbstractMvSerializationTests { + @Override + protected MvMax createTestInstance() { + return new MvMax(randomSource(), randomChild()); + } + + @Override + protected MvMax mutateInstance(MvMax instance) throws IOException { + return new MvMax(instance.source(), randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild)); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianSerializationTests.java new file mode 100644 index 0000000000000..067cc6430ce01 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianSerializationTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class MvMedianSerializationTests extends AbstractMvSerializationTests { + @Override + protected MvMedian createTestInstance() { + return new MvMedian(randomSource(), randomChild()); + } + + @Override + protected MvMedian mutateInstance(MvMedian instance) throws IOException { + return new MvMedian(instance.source(), randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild)); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinSerializationTests.java new file mode 100644 index 0000000000000..1f38587274353 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinSerializationTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class MvMinSerializationTests extends AbstractMvSerializationTests { + @Override + protected MvMin createTestInstance() { + return new MvMin(randomSource(), randomChild()); + } + + @Override + protected MvMin mutateInstance(MvMin instance) throws IOException { + return new MvMin(instance.source(), randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild)); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceSerializationTests.java new file mode 100644 index 0000000000000..64209ce0f4644 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceSerializationTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class MvSliceSerializationTests extends AbstractMvSerializationTests { + @Override + protected MvSlice createTestInstance() { + Source source = randomSource(); + Expression field = randomChild(); + Expression start = randomChild(); + Expression end = randomBoolean() ? null : randomChild(); + return new MvSlice(source, field, start, end); + } + + @Override + protected MvSlice mutateInstance(MvSlice instance) throws IOException { + Source source = instance.source(); + Expression field = instance.field(); + Expression start = instance.start(); + Expression end = instance.end(); + switch (between(0, 2)) { + case 0 -> field = randomValueOtherThan(field, AbstractExpressionSerializationTests::randomChild); + case 1 -> start = randomValueOtherThan(start, AbstractExpressionSerializationTests::randomChild); + case 2 -> end = randomValueOtherThan(end, () -> randomBoolean() ? null : randomChild()); + } + return new MvSlice(source, field, start, end); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortSerializationTests.java new file mode 100644 index 0000000000000..1728ad6f09357 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortSerializationTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class MvSortSerializationTests extends AbstractMvSerializationTests { + @Override + protected MvSort createTestInstance() { + Source source = randomSource(); + Expression field = randomChild(); + Expression order = randomBoolean() ? null : randomChild(); + return new MvSort(source, field, order); + } + + @Override + protected MvSort mutateInstance(MvSort instance) throws IOException { + Source source = instance.source(); + Expression field = instance.field(); + Expression order = instance.order(); + if (randomBoolean()) { + field = randomValueOtherThan(field, AbstractExpressionSerializationTests::randomChild); + } else { + order = randomValueOtherThan(order, () -> randomBoolean() ? null : randomChild()); + } + return new MvSort(source, field, order); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumSerializationTests.java new file mode 100644 index 0000000000000..e8ddcc9340b45 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumSerializationTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class MvSumSerializationTests extends AbstractMvSerializationTests { + @Override + protected MvSum createTestInstance() { + return new MvSum(randomSource(), randomChild()); + } + + @Override + protected MvSum mutateInstance(MvSum instance) throws IOException { + return new MvSum(instance.source(), randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild)); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipSerializationTests.java new file mode 100644 index 0000000000000..d16ca02627b29 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipSerializationTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class MvZipSerializationTests extends AbstractMvSerializationTests { + @Override + protected MvZip createTestInstance() { + Source source = randomSource(); + Expression mvLeft = randomChild(); + Expression mvRight = randomChild(); + Expression delim = randomBoolean() ? null : randomChild(); + return new MvZip(source, mvLeft, mvRight, delim); + } + + @Override + protected MvZip mutateInstance(MvZip instance) throws IOException { + Source source = instance.source(); + Expression mvLeft = instance.mvLeft(); + Expression mvRight = instance.mvRight(); + Expression delim = instance.delim(); + switch (between(0, 2)) { + case 0 -> mvLeft = randomValueOtherThan(mvLeft, AbstractExpressionSerializationTests::randomChild); + case 1 -> mvRight = randomValueOtherThan(mvRight, AbstractExpressionSerializationTests::randomChild); + case 2 -> delim = randomValueOtherThan(delim, () -> randomBoolean() ? null : randomChild()); + } + return new MvZip(source, mvLeft, mvRight, delim); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} From 5ef4e81c7ecccf4ee58dc2dbccb07c2a978b0562 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 19 Jun 2024 15:10:46 +0200 Subject: [PATCH 30/44] Add getAndSet to some BigArray implementations and make set return void (#109878) The majority of callsites for these things don't look at the return. Computing the return from bytes to numeric values is not free due to bounds checks etc. => this PR adds a set that doesn't read the previous value and where needed moves the existing code to a new getAndSet call --- .../common/util/AbstractHash.java | 2 +- .../elasticsearch/common/util/BigArrays.java | 28 ++++++++++------- .../common/util/BigByteArray.java | 4 +-- .../common/util/BigDoubleArray.java | 4 +-- .../common/util/BigFloatArray.java | 4 +-- .../common/util/BigIntArray.java | 9 +++++- .../common/util/BigLongArray.java | 10 ++++++- .../elasticsearch/common/util/ByteArray.java | 4 +-- .../common/util/DoubleArray.java | 4 +-- .../elasticsearch/common/util/FloatArray.java | 4 +-- .../elasticsearch/common/util/Int3Hash.java | 6 ++-- .../elasticsearch/common/util/IntArray.java | 7 ++++- .../elasticsearch/common/util/LongArray.java | 7 ++++- .../elasticsearch/common/util/LongHash.java | 2 +- .../common/util/LongLongHash.java | 4 +-- .../common/util/ReleasableByteArray.java | 2 +- .../common/util/ReleasableDoubleArray.java | 2 +- .../common/util/ReleasableIntArray.java | 7 ++++- .../common/util/ReleasableLongArray.java | 7 ++++- .../GlobalOrdinalsStringTermsAggregator.java | 2 +- .../common/util/MockBigArrays.java | 30 ++++++++++++------- 21 files changed, 97 insertions(+), 52 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/util/AbstractHash.java b/server/src/main/java/org/elasticsearch/common/util/AbstractHash.java index b0dc6d98fe16b..2bcc9b48ff1b8 100644 --- a/server/src/main/java/org/elasticsearch/common/util/AbstractHash.java +++ b/server/src/main/java/org/elasticsearch/common/util/AbstractHash.java @@ -32,7 +32,7 @@ public long id(long index) { } protected final long id(long index, long id) { - return ids.set(index, id + 1) - 1; + return ids.getAndSet(index, id + 1) - 1; } @Override diff --git a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java index 1e8b0cc83eaa6..199eaa83a2da3 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java @@ -118,11 +118,9 @@ public byte get(long index) { } @Override - public byte set(long index, byte value) { + public void set(long index, byte value) { assert indexIsInt(index); - final byte ret = array[(int) index]; array[(int) index] = value; - return ret; } @Override @@ -215,13 +213,19 @@ public int get(long index) { } @Override - public int set(long index, int value) { + public int getAndSet(long index, int value) { assert index >= 0 && index < size(); final int ret = (int) VH_PLATFORM_NATIVE_INT.get(array, (int) index << 2); VH_PLATFORM_NATIVE_INT.set(array, (int) index << 2, value); return ret; } + @Override + public void set(long index, int value) { + assert index >= 0 && index < size(); + VH_PLATFORM_NATIVE_INT.set(array, (int) index << 2, value); + } + @Override public int increment(long index, int inc) { assert index >= 0 && index < size(); @@ -272,13 +276,19 @@ public long get(long index) { } @Override - public long set(long index, long value) { + public long getAndSet(long index, long value) { assert index >= 0 && index < size(); final long ret = (long) VH_PLATFORM_NATIVE_LONG.get(array, (int) index << 3); VH_PLATFORM_NATIVE_LONG.set(array, (int) index << 3, value); return ret; } + @Override + public void set(long index, long value) { + assert index >= 0 && index < size(); + VH_PLATFORM_NATIVE_LONG.set(array, (int) index << 3, value); + } + @Override public long increment(long index, long inc) { assert index >= 0 && index < size(); @@ -336,11 +346,9 @@ public double get(long index) { } @Override - public double set(long index, double value) { + public void set(long index, double value) { assert index >= 0 && index < size(); - final double ret = (double) VH_PLATFORM_NATIVE_DOUBLE.get(array, (int) index << 3); VH_PLATFORM_NATIVE_DOUBLE.set(array, (int) index << 3, value); - return ret; } @Override @@ -400,11 +408,9 @@ public float get(long index) { } @Override - public float set(long index, float value) { + public void set(long index, float value) { assert index >= 0 && index < size(); - final float ret = (float) VH_PLATFORM_NATIVE_FLOAT.get(array, (int) index << 2); VH_PLATFORM_NATIVE_FLOAT.set(array, (int) index << 2, value); - return ret; } @Override diff --git a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java index 61848769e661d..1e714f122d885 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java @@ -47,13 +47,11 @@ public byte get(long index) { } @Override - public byte set(long index, byte value) { + public void set(long index, byte value) { final int pageIndex = pageIndex(index); final int indexInPage = indexInPage(index); final byte[] page = getPageForWriting(pageIndex); - final byte ret = page[indexInPage]; page[indexInPage] = value; - return ret; } @Override diff --git a/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java b/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java index 27dc454c85adf..3135ebb293070 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java @@ -42,13 +42,11 @@ public double get(long index) { } @Override - public double set(long index, double value) { + public void set(long index, double value) { final int pageIndex = pageIndex(index); final int indexInPage = indexInPage(index); final byte[] page = getPageForWriting(pageIndex); - final double ret = (double) VH_PLATFORM_NATIVE_DOUBLE.get(page, indexInPage << 3); VH_PLATFORM_NATIVE_DOUBLE.set(page, indexInPage << 3, value); - return ret; } @Override diff --git a/server/src/main/java/org/elasticsearch/common/util/BigFloatArray.java b/server/src/main/java/org/elasticsearch/common/util/BigFloatArray.java index 9502950c1d25b..380b2c8e12b34 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigFloatArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigFloatArray.java @@ -30,13 +30,11 @@ final class BigFloatArray extends AbstractBigByteArray implements FloatArray { } @Override - public float set(long index, float value) { + public void set(long index, float value) { final int pageIndex = pageIndex(index); final int indexInPage = indexInPage(index); final byte[] page = getPageForWriting(pageIndex); - final float ret = (float) VH_PLATFORM_NATIVE_FLOAT.get(page, indexInPage << 2); VH_PLATFORM_NATIVE_FLOAT.set(page, indexInPage << 2, value); - return ret; } @Override diff --git a/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java b/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java index 4388cc2308905..9ce9842c337c0 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java @@ -46,7 +46,7 @@ public int get(long index) { } @Override - public int set(long index, int value) { + public int getAndSet(long index, int value) { final int pageIndex = pageIndex(index); final int indexInPage = indexInPage(index); final byte[] page = getPageForWriting(pageIndex); @@ -55,6 +55,13 @@ public int set(long index, int value) { return ret; } + @Override + public void set(long index, int value) { + final int pageIndex = pageIndex(index); + final int indexInPage = indexInPage(index); + VH_PLATFORM_NATIVE_INT.set(getPageForWriting(pageIndex), indexInPage << 2, value); + } + @Override public int increment(long index, int inc) { final int pageIndex = pageIndex(index); diff --git a/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java b/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java index f0ccea26880c4..7d23e06f87658 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java @@ -41,7 +41,7 @@ public long get(long index) { } @Override - public long set(long index, long value) { + public long getAndSet(long index, long value) { final int pageIndex = pageIndex(index); final int indexInPage = indexInPage(index); final byte[] page = getPageForWriting(pageIndex); @@ -50,6 +50,14 @@ public long set(long index, long value) { return ret; } + @Override + public void set(long index, long value) { + final int pageIndex = pageIndex(index); + final int indexInPage = indexInPage(index); + final byte[] page = getPageForWriting(pageIndex); + VH_PLATFORM_NATIVE_LONG.set(page, indexInPage << 3, value); + } + @Override public long increment(long index, long inc) { final int pageIndex = pageIndex(index); diff --git a/server/src/main/java/org/elasticsearch/common/util/ByteArray.java b/server/src/main/java/org/elasticsearch/common/util/ByteArray.java index cb2b10632d08b..2c16e730635f8 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/ByteArray.java @@ -32,9 +32,9 @@ static ByteArray readFrom(StreamInput in) throws IOException { byte get(long index); /** - * Set a value at the given index and return the previous value. + * Set a value at the given index. */ - byte set(long index, byte value); + void set(long index, byte value); /** * Get a reference to a slice. diff --git a/server/src/main/java/org/elasticsearch/common/util/DoubleArray.java b/server/src/main/java/org/elasticsearch/common/util/DoubleArray.java index dde1157c905c7..80348d3b2945f 100644 --- a/server/src/main/java/org/elasticsearch/common/util/DoubleArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/DoubleArray.java @@ -28,9 +28,9 @@ static DoubleArray readFrom(StreamInput in) throws IOException { double get(long index); /** - * Set a value at the given index and return the previous value. + * Set a value at the given index. */ - double set(long index, double value); + void set(long index, double value); /** * Increment value at the given index by inc and return the value. diff --git a/server/src/main/java/org/elasticsearch/common/util/FloatArray.java b/server/src/main/java/org/elasticsearch/common/util/FloatArray.java index 33427299fe26c..057f51f45b1f6 100644 --- a/server/src/main/java/org/elasticsearch/common/util/FloatArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/FloatArray.java @@ -19,9 +19,9 @@ public interface FloatArray extends BigArray { float get(long index); /** - * Set a value at the given index and return the previous value. + * Set a value at the given index. */ - float set(long index, float value); + void set(long index, float value); /** * Fill slots between fromIndex inclusive to toIndex exclusive with value. diff --git a/server/src/main/java/org/elasticsearch/common/util/Int3Hash.java b/server/src/main/java/org/elasticsearch/common/util/Int3Hash.java index 051dd31ce8869..d9beea76b371a 100644 --- a/server/src/main/java/org/elasticsearch/common/util/Int3Hash.java +++ b/server/src/main/java/org/elasticsearch/common/util/Int3Hash.java @@ -132,9 +132,9 @@ protected void removeAndAdd(long index) { final long id = id(index, -1); assert id >= 0; long keyOffset = id * 3; - final int key1 = keys.set(keyOffset, 0); - final int key2 = keys.set(keyOffset + 1, 0); - final int key3 = keys.set(keyOffset + 2, 0); + final int key1 = keys.getAndSet(keyOffset, 0); + final int key2 = keys.getAndSet(keyOffset + 1, 0); + final int key3 = keys.getAndSet(keyOffset + 2, 0); reset(key1, key2, key3, id); } diff --git a/server/src/main/java/org/elasticsearch/common/util/IntArray.java b/server/src/main/java/org/elasticsearch/common/util/IntArray.java index 06975ffba46da..4f4dd61863595 100644 --- a/server/src/main/java/org/elasticsearch/common/util/IntArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/IntArray.java @@ -29,7 +29,12 @@ static IntArray readFrom(StreamInput in) throws IOException { /** * Set a value at the given index and return the previous value. */ - int set(long index, int value); + int getAndSet(long index, int value); + + /** + * Set a value at the given index + */ + void set(long index, int value); /** * Increment value at the given index by inc and return the value. diff --git a/server/src/main/java/org/elasticsearch/common/util/LongArray.java b/server/src/main/java/org/elasticsearch/common/util/LongArray.java index 59321d1957f4d..cff8c86eef4b6 100644 --- a/server/src/main/java/org/elasticsearch/common/util/LongArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/LongArray.java @@ -30,7 +30,12 @@ static LongArray readFrom(StreamInput in) throws IOException { /** * Set a value at the given index and return the previous value. */ - long set(long index, long value); + long getAndSet(long index, long value); + + /** + * Set a value at the given index. + */ + void set(long index, long value); /** * Increment value at the given index by inc and return the value. diff --git a/server/src/main/java/org/elasticsearch/common/util/LongHash.java b/server/src/main/java/org/elasticsearch/common/util/LongHash.java index 6ca4d9f0986f6..32364f8d2f341 100644 --- a/server/src/main/java/org/elasticsearch/common/util/LongHash.java +++ b/server/src/main/java/org/elasticsearch/common/util/LongHash.java @@ -110,7 +110,7 @@ public long add(long key) { protected void removeAndAdd(long index) { final long id = id(index, -1); assert id >= 0; - final long key = keys.set(id, 0); + final long key = keys.getAndSet(id, 0); reset(key, id); } diff --git a/server/src/main/java/org/elasticsearch/common/util/LongLongHash.java b/server/src/main/java/org/elasticsearch/common/util/LongLongHash.java index 13405d491298c..61dd3b457029c 100644 --- a/server/src/main/java/org/elasticsearch/common/util/LongLongHash.java +++ b/server/src/main/java/org/elasticsearch/common/util/LongLongHash.java @@ -134,8 +134,8 @@ protected void removeAndAdd(long index) { final long id = id(index, -1); assert id >= 0; long keyOffset = id * 2; - final long key1 = keys.set(keyOffset, 0); - final long key2 = keys.set(keyOffset + 1, 0); + final long key1 = keys.getAndSet(keyOffset, 0); + final long key2 = keys.getAndSet(keyOffset + 1, 0); reset(key1, key2, id); } diff --git a/server/src/main/java/org/elasticsearch/common/util/ReleasableByteArray.java b/server/src/main/java/org/elasticsearch/common/util/ReleasableByteArray.java index ce0f5bdfedd40..0eea3443391c1 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ReleasableByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/ReleasableByteArray.java @@ -62,7 +62,7 @@ public boolean get(long index, int len, BytesRef ref) { } @Override - public byte set(long index, byte value) { + public void set(long index, byte value) { throw new UnsupportedOperationException(); } diff --git a/server/src/main/java/org/elasticsearch/common/util/ReleasableDoubleArray.java b/server/src/main/java/org/elasticsearch/common/util/ReleasableDoubleArray.java index 61b2f52ee384e..d7279b845f225 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ReleasableDoubleArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/ReleasableDoubleArray.java @@ -44,7 +44,7 @@ public double get(long index) { } @Override - public double set(long index, double value) { + public void set(long index, double value) { throw new UnsupportedOperationException(); } diff --git a/server/src/main/java/org/elasticsearch/common/util/ReleasableIntArray.java b/server/src/main/java/org/elasticsearch/common/util/ReleasableIntArray.java index 2b433f6812a87..9dbc11328974a 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ReleasableIntArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/ReleasableIntArray.java @@ -44,7 +44,12 @@ public int get(long index) { } @Override - public int set(long index, int value) { + public int getAndSet(long index, int value) { + throw new UnsupportedOperationException(); + } + + @Override + public void set(long index, int value) { throw new UnsupportedOperationException(); } diff --git a/server/src/main/java/org/elasticsearch/common/util/ReleasableLongArray.java b/server/src/main/java/org/elasticsearch/common/util/ReleasableLongArray.java index 2980713e2e652..4f36cdc890d78 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ReleasableLongArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/ReleasableLongArray.java @@ -45,7 +45,12 @@ public long get(long index) { } @Override - public long set(long index, long value) { + public long getAndSet(long index, long value) { + throw new UnsupportedOperationException(); + } + + @Override + public void set(long index, long value) { throw new UnsupportedOperationException(); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index acdb24b9109af..26204e1a2530f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -380,7 +380,7 @@ private void mapSegmentCountsToGlobalCounts(LongUnaryOperator mapping) throws IO for (long i = 1; i < segmentDocCounts.size(); i++) { // We use set(...) here, because we need to reset the slow to 0. // segmentDocCounts get reused over the segments and otherwise counts would be too high. - long inc = segmentDocCounts.set(i, 0); + long inc = segmentDocCounts.getAndSet(i, 0); if (inc == 0) { continue; } diff --git a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java index b1eddf927d3f3..9378b51de78df 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java +++ b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java @@ -389,8 +389,8 @@ public byte get(long index) { } @Override - public byte set(long index, byte value) { - return in.set(index, value); + public void set(long index, byte value) { + in.set(index, value); } @Override @@ -469,8 +469,13 @@ public int get(long index) { } @Override - public int set(long index, int value) { - return in.set(index, value); + public int getAndSet(long index, int value) { + return in.getAndSet(index, value); + } + + @Override + public void set(long index, int value) { + in.set(index, value); } @Override @@ -524,8 +529,13 @@ public long get(long index) { } @Override - public long set(long index, long value) { - return in.set(index, value); + public long getAndSet(long index, long value) { + return in.getAndSet(index, value); + } + + @Override + public void set(long index, long value) { + in.set(index, value); } @Override @@ -584,8 +594,8 @@ public float get(long index) { } @Override - public float set(long index, float value) { - return in.set(index, value); + public void set(long index, float value) { + in.set(index, value); } @Override @@ -629,8 +639,8 @@ public double get(long index) { } @Override - public double set(long index, double value) { - return in.set(index, value); + public void set(long index, double value) { + in.set(index, value); } @Override From 227034e64d59c484418a4c9fb4cccb4a5a74fffc Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 19 Jun 2024 15:19:58 +0200 Subject: [PATCH 31/44] Remove some duplication from BulkRequestParser (#109302) No need to duplicate most of the index-request building 3x. --- .../action/bulk/BulkRequestParser.java | 69 +++++-------------- 1 file changed, 18 insertions(+), 51 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java index 5dccd1b55f554..75ab08de942dc 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java @@ -341,58 +341,25 @@ public void parse( // we use internalAdd so we don't fork here, this allows us not to copy over the big byte array to small chunks // of index request. - if ("index".equals(action)) { - if (opType == null) { - indexRequestConsumer.accept( - new IndexRequest(index).id(id) - .routing(routing) - .version(version) - .versionType(versionType) - .setPipeline(pipeline) - .setIfSeqNo(ifSeqNo) - .setIfPrimaryTerm(ifPrimaryTerm) - .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType) - .setDynamicTemplates(dynamicTemplates) - .setRequireAlias(requireAlias) - .setRequireDataStream(requireDataStream) - .setListExecutedPipelines(listExecutedPipelines), - type - ); - } else { - indexRequestConsumer.accept( - new IndexRequest(index).id(id) - .routing(routing) - .version(version) - .versionType(versionType) - .create("create".equals(opType)) - .setPipeline(pipeline) - .setIfSeqNo(ifSeqNo) - .setIfPrimaryTerm(ifPrimaryTerm) - .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType) - .setDynamicTemplates(dynamicTemplates) - .setRequireAlias(requireAlias) - .setRequireDataStream(requireDataStream) - .setListExecutedPipelines(listExecutedPipelines), - type - ); + if ("index".equals(action) || "create".equals(action)) { + var indexRequest = new IndexRequest(index).id(id) + .routing(routing) + .version(version) + .versionType(versionType) + .setPipeline(pipeline) + .setIfSeqNo(ifSeqNo) + .setIfPrimaryTerm(ifPrimaryTerm) + .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType) + .setDynamicTemplates(dynamicTemplates) + .setRequireAlias(requireAlias) + .setRequireDataStream(requireDataStream) + .setListExecutedPipelines(listExecutedPipelines); + if ("create".equals(action)) { + indexRequest = indexRequest.create(true); + } else if (opType != null) { + indexRequest = indexRequest.create("create".equals(opType)); } - } else if ("create".equals(action)) { - indexRequestConsumer.accept( - new IndexRequest(index).id(id) - .routing(routing) - .version(version) - .versionType(versionType) - .create(true) - .setPipeline(pipeline) - .setIfSeqNo(ifSeqNo) - .setIfPrimaryTerm(ifPrimaryTerm) - .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType) - .setDynamicTemplates(dynamicTemplates) - .setRequireAlias(requireAlias) - .setRequireDataStream(requireDataStream) - .setListExecutedPipelines(listExecutedPipelines), - type - ); + indexRequestConsumer.accept(indexRequest, type); } else if ("update".equals(action)) { if (version != Versions.MATCH_ANY || versionType != VersionType.INTERNAL) { throw new IllegalArgumentException( From 1319af78d226faa560d348a70aca97dc59a5cda1 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 19 Jun 2024 14:46:00 +0100 Subject: [PATCH 32/44] AwaitsFix for #109686 --- .../elasticsearch/action/admin/cluster/node/tasks/TasksIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 5ea1b869f417e..30be4c86eaeef 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -488,6 +488,7 @@ public void onTaskRegistered(Task task) { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/109686") public void testTasksCancellation() throws Exception { // Start blocking test task // Get real client (the plugin is not registered on transport nodes) From b65e02c60177ede51325b607f602f726e14c7339 Mon Sep 17 00:00:00 2001 From: Tim Grein Date: Wed, 19 Jun 2024 16:02:31 +0200 Subject: [PATCH 33/44] [Inference API] Remove unused constructors (#109863) --- .../mistral/embeddings/MistralEmbeddingsModel.java | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/embeddings/MistralEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/embeddings/MistralEmbeddingsModel.java index c3d261efea79a..2631dfecccab3 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/embeddings/MistralEmbeddingsModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/embeddings/MistralEmbeddingsModel.java @@ -23,7 +23,6 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.Map; -import java.util.Objects; import static org.elasticsearch.xpack.inference.services.mistral.MistralConstants.API_EMBEDDINGS_PATH; @@ -51,23 +50,11 @@ public MistralEmbeddingsModel( ); } - public MistralEmbeddingsModel(MistralEmbeddingsModel model, TaskSettings taskSettings, RateLimitSettings rateLimitSettings) { - super(model, taskSettings); - this.model = Objects.requireNonNull(model.model); - this.rateLimitSettings = Objects.requireNonNull(rateLimitSettings); - setEndpointUrl(); - } - public MistralEmbeddingsModel(MistralEmbeddingsModel model, MistralEmbeddingsServiceSettings serviceSettings) { super(model, serviceSettings); setPropertiesFromServiceSettings(serviceSettings); } - protected MistralEmbeddingsModel(ModelConfigurations modelConfigurations, ModelSecrets modelSecrets) { - super(modelConfigurations, modelSecrets); - setPropertiesFromServiceSettings((MistralEmbeddingsServiceSettings) modelConfigurations.getServiceSettings()); - } - public MistralEmbeddingsModel( String inferenceEntityId, TaskType taskType, From 0145a41ea503e3a396b9381d97f21e30b0a3d6e2 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Wed, 19 Jun 2024 16:06:23 +0200 Subject: [PATCH 34/44] Log more data for PrevalidateShardPathIT#testCheckShards (#109887) `PrevalidateShardPathIT#testCheckShards` still keeps failing on CI, so we need more logging. * Add debug logging for `MasterService` events to verify that a cluster service update is triggered. * Log explanation for stuck shards regardless whether they are located on node_2 in the cluster state. See #104807 --- muted-tests.yml | 4 +--- .../org/elasticsearch/cluster/PrevalidateShardPathIT.java | 6 +++++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 86ddd0b26e925..ef3c8188498a9 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -67,9 +67,6 @@ tests: - class: "org.elasticsearch.xpack.shutdown.NodeShutdownReadinessIT" issue: "https://github.com/elastic/elasticsearch/issues/109838" method: "testShutdownReadinessService" -- class: "org.elasticsearch.cluster.PrevalidateShardPathIT" - issue: "https://github.com/elastic/elasticsearch/issues/104807" - method: "testCheckShards" - class: "org.elasticsearch.packaging.test.RpmPreservationTests" issue: "https://github.com/elastic/elasticsearch/issues/109898" method: "test30PreserveConfig" @@ -82,6 +79,7 @@ tests: issue: "https://github.com/elastic/elasticsearch/issues/109915" method: "testRandomMultiValuesTopN" + # Examples: # # Mute a single test case in a YAML test suite: diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateShardPathIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateShardPathIT.java index ea566c90ad769..3ff7e66d25639 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateShardPathIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateShardPathIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.HashSet; import java.util.List; @@ -39,6 +40,10 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class PrevalidateShardPathIT extends ESIntegTestCase { + @TestLogging( + value = "org.elasticsearch.cluster.service.MasterService:DEBUG", + reason = "https://github.com/elastic/elasticsearch/issues/104807" + ) public void testCheckShards() throws Exception { internalCluster().startMasterOnlyNode(); String node1 = internalCluster().startDataOnlyNode(); @@ -95,7 +100,6 @@ public void testCheckShards() throws Exception { .allShards() .filter(s -> s.getIndexName().equals(indexName)) .filter(s -> node2ShardIds.contains(s.shardId())) - .filter(s -> s.currentNodeId().equals(node2Id)) .toList(); logger.info("Found {} shards on the relocation source node {} in the cluster state", node2Shards, node2Id); for (var node2Shard : node2Shards) { From 2233349f7612c4ad284b66c9ab3be3e7c5787436 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Iv=C3=A1n=20Cea=20Fontenla?= Date: Wed, 19 Jun 2024 16:48:45 +0200 Subject: [PATCH 35/44] ESQL: top_list aggregation (#109386) Added `top_list(, , )` aggregation, that collect top N values per bucket. Works with the same types as MAX/MIN. - Added the aggregation function - Added a template to generate the aggregators - Added a template to generate the `BucketedSort` implementations per-type - This structure is based on the `BucketedSort` structure used on the original aggregations. It was modified to better fit the ESQL ecosystem (Blocks based, no docs...) Also added a guide to create aggregations. Fixes https://github.com/elastic/elasticsearch/issues/109213 --- .gitattributes | 2 + docs/changelog/109386.yaml | 6 + .../compute/ann/GroupingAggregator.java | 4 + x-pack/plugin/esql/compute/build.gradle | 51 ++- .../aggregation/TopListDoubleAggregator.java | 137 +++++++ .../aggregation/TopListIntAggregator.java | 137 +++++++ .../aggregation/TopListLongAggregator.java | 137 +++++++ .../compute/data/sort/DoubleBucketedSort.java | 346 ++++++++++++++++ .../compute/data/sort/IntBucketedSort.java | 346 ++++++++++++++++ .../compute/data/sort/LongBucketedSort.java | 346 ++++++++++++++++ .../TopListDoubleAggregatorFunction.java | 126 ++++++ ...pListDoubleAggregatorFunctionSupplier.java | 45 +++ ...pListDoubleGroupingAggregatorFunction.java | 202 ++++++++++ .../TopListIntAggregatorFunction.java | 126 ++++++ .../TopListIntAggregatorFunctionSupplier.java | 45 +++ .../TopListIntGroupingAggregatorFunction.java | 200 ++++++++++ .../TopListLongAggregatorFunction.java | 126 ++++++ ...TopListLongAggregatorFunctionSupplier.java | 45 +++ ...TopListLongGroupingAggregatorFunction.java | 202 ++++++++++ .../compute/src/main/java/module-info.java | 1 + .../aggregation/X-TopListAggregator.java.st | 142 +++++++ .../compute/data/sort/X-BucketedSort.java.st | 350 +++++++++++++++++ .../TopListDoubleAggregatorFunctionTests.java | 44 +++ .../TopListIntAggregatorFunctionTests.java | 44 +++ .../TopListLongAggregatorFunctionTests.java | 44 +++ .../data/sort/BucketedSortTestCase.java | 368 ++++++++++++++++++ .../data/sort/DoubleBucketedSortTests.java | 58 +++ .../data/sort/IntBucketedSortTests.java | 58 +++ .../data/sort/LongBucketedSortTests.java | 59 +++ .../src/main/resources/meta.csv-spec | 18 +- .../main/resources/stats_top_list.csv-spec | 156 ++++++++ .../xpack/esql/action/EsqlCapabilities.java | 6 + .../function/EsqlFunctionRegistry.java | 2 + .../expression/function/aggregate/Max.java | 8 +- .../expression/function/aggregate/Min.java | 8 +- .../function/aggregate/NumericAggregate.java | 22 ++ .../function/aggregate/TopList.java | 181 +++++++++ .../function/aggregate/package-info.java | 176 +++++++++ .../xpack/esql/io/stream/PlanNamedTypes.java | 2 + .../xpack/esql/package-info.java | 1 + .../xpack/esql/planner/AggregateMapper.java | 6 +- 41 files changed, 4364 insertions(+), 19 deletions(-) create mode 100644 docs/changelog/109386.yaml create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListDoubleAggregator.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListIntAggregator.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListLongAggregator.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/sort/DoubleBucketedSort.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/sort/IntBucketedSort.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/sort/LongBucketedSort.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleGroupingAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntGroupingAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongGroupingAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-TopListAggregator.java.st create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/sort/X-BucketedSort.java.st create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunctionTests.java create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunctionTests.java create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunctionTests.java create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/BucketedSortTestCase.java create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/DoubleBucketedSortTests.java create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/IntBucketedSortTests.java create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/LongBucketedSortTests.java create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top_list.csv-spec create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopList.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java diff --git a/.gitattributes b/.gitattributes index 6a8de5462ec3f..04881c92ede00 100644 --- a/.gitattributes +++ b/.gitattributes @@ -4,6 +4,8 @@ CHANGELOG.asciidoc merge=union # Windows build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/*.asciidoc text eol=lf +x-pack/plugin/esql/compute/src/main/generated/** linguist-generated=true +x-pack/plugin/esql/compute/src/main/generated-src/** linguist-generated=true x-pack/plugin/esql/src/main/antlr/*.tokens linguist-generated=true x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/*.interp linguist-generated=true x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer*.java linguist-generated=true diff --git a/docs/changelog/109386.yaml b/docs/changelog/109386.yaml new file mode 100644 index 0000000000000..984ee96dde063 --- /dev/null +++ b/docs/changelog/109386.yaml @@ -0,0 +1,6 @@ +pr: 109386 +summary: "ESQL: `top_list` aggregation" +area: ES|QL +type: feature +issues: + - 109213 diff --git a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/GroupingAggregator.java b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/GroupingAggregator.java index 7e92fc5c2734e..0216ea07e5c7c 100644 --- a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/GroupingAggregator.java +++ b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/GroupingAggregator.java @@ -12,6 +12,10 @@ import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; +/** + * Annotates a class that implements an aggregation function with grouping. + * See {@link Aggregator} for more information. + */ @Target(ElementType.TYPE) @Retention(RetentionPolicy.SOURCE) public @interface GroupingAggregator { diff --git a/x-pack/plugin/esql/compute/build.gradle b/x-pack/plugin/esql/compute/build.gradle index 635a53d1ac98a..bc206ee1d78d6 100644 --- a/x-pack/plugin/esql/compute/build.gradle +++ b/x-pack/plugin/esql/compute/build.gradle @@ -36,10 +36,11 @@ spotless { } } -def prop(Type, type, TYPE, BYTES, Array, Hash) { +def prop(Type, type, Wrapper, TYPE, BYTES, Array, Hash) { return [ "Type" : Type, "type" : type, + "Wrapper": Wrapper, "TYPE" : TYPE, "BYTES" : BYTES, "Array" : Array, @@ -55,12 +56,13 @@ def prop(Type, type, TYPE, BYTES, Array, Hash) { } tasks.named('stringTemplates').configure { - var intProperties = prop("Int", "int", "INT", "Integer.BYTES", "IntArray", "LongHash") - var floatProperties = prop("Float", "float", "FLOAT", "Float.BYTES", "FloatArray", "LongHash") - var longProperties = prop("Long", "long", "LONG", "Long.BYTES", "LongArray", "LongHash") - var doubleProperties = prop("Double", "double", "DOUBLE", "Double.BYTES", "DoubleArray", "LongHash") - var bytesRefProperties = prop("BytesRef", "BytesRef", "BYTES_REF", "org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_OBJECT_REF", "", "BytesRefHash") - var booleanProperties = prop("Boolean", "boolean", "BOOLEAN", "Byte.BYTES", "BitArray", "") + var intProperties = prop("Int", "int", "Integer", "INT", "Integer.BYTES", "IntArray", "LongHash") + var floatProperties = prop("Float", "float", "Float", "FLOAT", "Float.BYTES", "FloatArray", "LongHash") + var longProperties = prop("Long", "long", "Long", "LONG", "Long.BYTES", "LongArray", "LongHash") + var doubleProperties = prop("Double", "double", "Double", "DOUBLE", "Double.BYTES", "DoubleArray", "LongHash") + var bytesRefProperties = prop("BytesRef", "BytesRef", "", "BYTES_REF", "org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_OBJECT_REF", "", "BytesRefHash") + var booleanProperties = prop("Boolean", "boolean", "Boolean", "BOOLEAN", "Byte.BYTES", "BitArray", "") + // primitive vectors File vectorInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st") template { @@ -500,6 +502,24 @@ tasks.named('stringTemplates').configure { it.outputFile = "org/elasticsearch/compute/aggregation/RateDoubleAggregator.java" } + + File topListAggregatorInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/aggregation/X-TopListAggregator.java.st") + template { + it.properties = intProperties + it.inputFile = topListAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/TopListIntAggregator.java" + } + template { + it.properties = longProperties + it.inputFile = topListAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/TopListLongAggregator.java" + } + template { + it.properties = doubleProperties + it.inputFile = topListAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/TopListDoubleAggregator.java" + } + File multivalueDedupeInputFile = file("src/main/java/org/elasticsearch/compute/operator/mvdedupe/X-MultivalueDedupe.java.st") template { it.properties = intProperties @@ -635,4 +655,21 @@ tasks.named('stringTemplates').configure { it.inputFile = resultBuilderInputFile it.outputFile = "org/elasticsearch/compute/operator/topn/ResultBuilderForFloat.java" } + + File bucketedSortInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/data/sort/X-BucketedSort.java.st") + template { + it.properties = intProperties + it.inputFile = bucketedSortInputFile + it.outputFile = "org/elasticsearch/compute/data/sort/IntBucketedSort.java" + } + template { + it.properties = longProperties + it.inputFile = bucketedSortInputFile + it.outputFile = "org/elasticsearch/compute/data/sort/LongBucketedSort.java" + } + template { + it.properties = doubleProperties + it.inputFile = bucketedSortInputFile + it.outputFile = "org/elasticsearch/compute/data/sort/DoubleBucketedSort.java" + } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListDoubleAggregator.java new file mode 100644 index 0000000000000..941722b4424d3 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListDoubleAggregator.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.sort.DoubleBucketedSort; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.search.sort.SortOrder; + +/** + * Aggregates the top N field values for double. + */ +@Aggregator({ @IntermediateState(name = "topList", type = "DOUBLE_BLOCK") }) +@GroupingAggregator +class TopListDoubleAggregator { + public static SingleState initSingle(BigArrays bigArrays, int limit, boolean ascending) { + return new SingleState(bigArrays, limit, ascending); + } + + public static void combine(SingleState state, double v) { + state.add(v); + } + + public static void combineIntermediate(SingleState state, DoubleBlock values) { + int start = values.getFirstValueIndex(0); + int end = start + values.getValueCount(0); + for (int i = start; i < end; i++) { + combine(state, values.getDouble(i)); + } + } + + public static Block evaluateFinal(SingleState state, DriverContext driverContext) { + return state.toBlock(driverContext.blockFactory()); + } + + public static GroupingState initGrouping(BigArrays bigArrays, int limit, boolean ascending) { + return new GroupingState(bigArrays, limit, ascending); + } + + public static void combine(GroupingState state, int groupId, double v) { + state.add(groupId, v); + } + + public static void combineIntermediate(GroupingState state, int groupId, DoubleBlock values, int valuesPosition) { + int start = values.getFirstValueIndex(valuesPosition); + int end = start + values.getValueCount(valuesPosition); + for (int i = start; i < end; i++) { + combine(state, groupId, values.getDouble(i)); + } + } + + public static void combineStates(GroupingState current, int groupId, GroupingState state, int statePosition) { + current.merge(groupId, state, statePosition); + } + + public static Block evaluateFinal(GroupingState state, IntVector selected, DriverContext driverContext) { + return state.toBlock(driverContext.blockFactory(), selected); + } + + public static class GroupingState implements Releasable { + private final DoubleBucketedSort sort; + + private GroupingState(BigArrays bigArrays, int limit, boolean ascending) { + this.sort = new DoubleBucketedSort(bigArrays, ascending ? SortOrder.ASC : SortOrder.DESC, limit); + } + + public void add(int groupId, double value) { + sort.collect(value, groupId); + } + + public void merge(int groupId, GroupingState other, int otherGroupId) { + sort.merge(groupId, other.sort, otherGroupId); + } + + void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + blocks[offset] = toBlock(driverContext.blockFactory(), selected); + } + + Block toBlock(BlockFactory blockFactory, IntVector selected) { + return sort.toBlock(blockFactory, selected); + } + + void enableGroupIdTracking(SeenGroupIds seen) { + // we figure out seen values from nulls on the values block + } + + @Override + public void close() { + Releasables.closeExpectNoException(sort); + } + } + + public static class SingleState implements Releasable { + private final GroupingState internalState; + + private SingleState(BigArrays bigArrays, int limit, boolean ascending) { + this.internalState = new GroupingState(bigArrays, limit, ascending); + } + + public void add(double value) { + internalState.add(0, value); + } + + public void merge(GroupingState other) { + internalState.merge(0, other, 0); + } + + void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = toBlock(driverContext.blockFactory()); + } + + Block toBlock(BlockFactory blockFactory) { + try (var intValues = blockFactory.newConstantIntVector(0, 1)) { + return internalState.toBlock(blockFactory, intValues); + } + } + + @Override + public void close() { + Releasables.closeExpectNoException(internalState); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListIntAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListIntAggregator.java new file mode 100644 index 0000000000000..dafbf1c2a3051 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListIntAggregator.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.sort.IntBucketedSort; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.search.sort.SortOrder; + +/** + * Aggregates the top N field values for int. + */ +@Aggregator({ @IntermediateState(name = "topList", type = "INT_BLOCK") }) +@GroupingAggregator +class TopListIntAggregator { + public static SingleState initSingle(BigArrays bigArrays, int limit, boolean ascending) { + return new SingleState(bigArrays, limit, ascending); + } + + public static void combine(SingleState state, int v) { + state.add(v); + } + + public static void combineIntermediate(SingleState state, IntBlock values) { + int start = values.getFirstValueIndex(0); + int end = start + values.getValueCount(0); + for (int i = start; i < end; i++) { + combine(state, values.getInt(i)); + } + } + + public static Block evaluateFinal(SingleState state, DriverContext driverContext) { + return state.toBlock(driverContext.blockFactory()); + } + + public static GroupingState initGrouping(BigArrays bigArrays, int limit, boolean ascending) { + return new GroupingState(bigArrays, limit, ascending); + } + + public static void combine(GroupingState state, int groupId, int v) { + state.add(groupId, v); + } + + public static void combineIntermediate(GroupingState state, int groupId, IntBlock values, int valuesPosition) { + int start = values.getFirstValueIndex(valuesPosition); + int end = start + values.getValueCount(valuesPosition); + for (int i = start; i < end; i++) { + combine(state, groupId, values.getInt(i)); + } + } + + public static void combineStates(GroupingState current, int groupId, GroupingState state, int statePosition) { + current.merge(groupId, state, statePosition); + } + + public static Block evaluateFinal(GroupingState state, IntVector selected, DriverContext driverContext) { + return state.toBlock(driverContext.blockFactory(), selected); + } + + public static class GroupingState implements Releasable { + private final IntBucketedSort sort; + + private GroupingState(BigArrays bigArrays, int limit, boolean ascending) { + this.sort = new IntBucketedSort(bigArrays, ascending ? SortOrder.ASC : SortOrder.DESC, limit); + } + + public void add(int groupId, int value) { + sort.collect(value, groupId); + } + + public void merge(int groupId, GroupingState other, int otherGroupId) { + sort.merge(groupId, other.sort, otherGroupId); + } + + void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + blocks[offset] = toBlock(driverContext.blockFactory(), selected); + } + + Block toBlock(BlockFactory blockFactory, IntVector selected) { + return sort.toBlock(blockFactory, selected); + } + + void enableGroupIdTracking(SeenGroupIds seen) { + // we figure out seen values from nulls on the values block + } + + @Override + public void close() { + Releasables.closeExpectNoException(sort); + } + } + + public static class SingleState implements Releasable { + private final GroupingState internalState; + + private SingleState(BigArrays bigArrays, int limit, boolean ascending) { + this.internalState = new GroupingState(bigArrays, limit, ascending); + } + + public void add(int value) { + internalState.add(0, value); + } + + public void merge(GroupingState other) { + internalState.merge(0, other, 0); + } + + void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = toBlock(driverContext.blockFactory()); + } + + Block toBlock(BlockFactory blockFactory) { + try (var intValues = blockFactory.newConstantIntVector(0, 1)) { + return internalState.toBlock(blockFactory, intValues); + } + } + + @Override + public void close() { + Releasables.closeExpectNoException(internalState); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListLongAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListLongAggregator.java new file mode 100644 index 0000000000000..c0e7122a4be0b --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListLongAggregator.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.sort.LongBucketedSort; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.search.sort.SortOrder; + +/** + * Aggregates the top N field values for long. + */ +@Aggregator({ @IntermediateState(name = "topList", type = "LONG_BLOCK") }) +@GroupingAggregator +class TopListLongAggregator { + public static SingleState initSingle(BigArrays bigArrays, int limit, boolean ascending) { + return new SingleState(bigArrays, limit, ascending); + } + + public static void combine(SingleState state, long v) { + state.add(v); + } + + public static void combineIntermediate(SingleState state, LongBlock values) { + int start = values.getFirstValueIndex(0); + int end = start + values.getValueCount(0); + for (int i = start; i < end; i++) { + combine(state, values.getLong(i)); + } + } + + public static Block evaluateFinal(SingleState state, DriverContext driverContext) { + return state.toBlock(driverContext.blockFactory()); + } + + public static GroupingState initGrouping(BigArrays bigArrays, int limit, boolean ascending) { + return new GroupingState(bigArrays, limit, ascending); + } + + public static void combine(GroupingState state, int groupId, long v) { + state.add(groupId, v); + } + + public static void combineIntermediate(GroupingState state, int groupId, LongBlock values, int valuesPosition) { + int start = values.getFirstValueIndex(valuesPosition); + int end = start + values.getValueCount(valuesPosition); + for (int i = start; i < end; i++) { + combine(state, groupId, values.getLong(i)); + } + } + + public static void combineStates(GroupingState current, int groupId, GroupingState state, int statePosition) { + current.merge(groupId, state, statePosition); + } + + public static Block evaluateFinal(GroupingState state, IntVector selected, DriverContext driverContext) { + return state.toBlock(driverContext.blockFactory(), selected); + } + + public static class GroupingState implements Releasable { + private final LongBucketedSort sort; + + private GroupingState(BigArrays bigArrays, int limit, boolean ascending) { + this.sort = new LongBucketedSort(bigArrays, ascending ? SortOrder.ASC : SortOrder.DESC, limit); + } + + public void add(int groupId, long value) { + sort.collect(value, groupId); + } + + public void merge(int groupId, GroupingState other, int otherGroupId) { + sort.merge(groupId, other.sort, otherGroupId); + } + + void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + blocks[offset] = toBlock(driverContext.blockFactory(), selected); + } + + Block toBlock(BlockFactory blockFactory, IntVector selected) { + return sort.toBlock(blockFactory, selected); + } + + void enableGroupIdTracking(SeenGroupIds seen) { + // we figure out seen values from nulls on the values block + } + + @Override + public void close() { + Releasables.closeExpectNoException(sort); + } + } + + public static class SingleState implements Releasable { + private final GroupingState internalState; + + private SingleState(BigArrays bigArrays, int limit, boolean ascending) { + this.internalState = new GroupingState(bigArrays, limit, ascending); + } + + public void add(long value) { + internalState.add(0, value); + } + + public void merge(GroupingState other) { + internalState.merge(0, other, 0); + } + + void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = toBlock(driverContext.blockFactory()); + } + + Block toBlock(BlockFactory blockFactory) { + try (var intValues = blockFactory.newConstantIntVector(0, 1)) { + return internalState.toBlock(blockFactory, intValues); + } + } + + @Override + public void close() { + Releasables.closeExpectNoException(internalState); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/sort/DoubleBucketedSort.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/sort/DoubleBucketedSort.java new file mode 100644 index 0000000000000..63318a2189908 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/sort/DoubleBucketedSort.java @@ -0,0 +1,346 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data.sort; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.common.util.DoubleArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.search.sort.BucketedSort; +import org.elasticsearch.search.sort.SortOrder; + +import java.util.Arrays; +import java.util.stream.IntStream; + +/** + * Aggregates the top N double values per bucket. + * See {@link BucketedSort} for more information. + * This class is generated. Edit @{code X-BucketedSort.java.st} instead of this file. + */ +public class DoubleBucketedSort implements Releasable { + + private final BigArrays bigArrays; + private final SortOrder order; + private final int bucketSize; + /** + * {@code true} if the bucket is in heap mode, {@code false} if + * it is still gathering. + */ + private final BitArray heapMode; + /** + * An array containing all the values on all buckets. The structure is as follows: + *

    + * For each bucket, there are bucketSize elements, based on the bucket id (0, 1, 2...). + * Then, for each bucket, it can be in 2 states: + *

    + *
      + *
    • + * Gather mode: All buckets start in gather mode, and remain here while they have less than bucketSize elements. + * In gather mode, the elements are stored in the array from the highest index to the lowest index. + * The lowest index contains the offset to the next slot to be filled. + *

      + * This allows us to insert elements in O(1) time. + *

      + *

      + * When the bucketSize-th element is collected, the bucket transitions to heap mode, by heapifying its contents. + *

      + *
    • + *
    • + * Heap mode: The bucket slots are organized as a min heap structure. + *

      + * The root of the heap is the minimum value in the bucket, + * which allows us to quickly discard new values that are not in the top N. + *

      + *
    • + *
    + */ + private DoubleArray values; + + public DoubleBucketedSort(BigArrays bigArrays, SortOrder order, int bucketSize) { + this.bigArrays = bigArrays; + this.order = order; + this.bucketSize = bucketSize; + heapMode = new BitArray(0, bigArrays); + + boolean success = false; + try { + values = bigArrays.newDoubleArray(0, false); + success = true; + } finally { + if (success == false) { + close(); + } + } + } + + /** + * Collects a {@code value} into a {@code bucket}. + *

    + * It may or may not be inserted in the heap, depending on if it is better than the current root. + *

    + */ + public void collect(double value, int bucket) { + long rootIndex = (long) bucket * bucketSize; + if (inHeapMode(bucket)) { + if (betterThan(value, values.get(rootIndex))) { + values.set(rootIndex, value); + downHeap(rootIndex, 0); + } + return; + } + // Gathering mode + long requiredSize = rootIndex + bucketSize; + if (values.size() < requiredSize) { + grow(requiredSize); + } + int next = getNextGatherOffset(rootIndex); + assert 0 <= next && next < bucketSize + : "Expected next to be in the range of valid buckets [0 <= " + next + " < " + bucketSize + "]"; + long index = next + rootIndex; + values.set(index, value); + if (next == 0) { + heapMode.set(bucket); + heapify(rootIndex); + } else { + setNextGatherOffset(rootIndex, next - 1); + } + } + + /** + * The order of the sort. + */ + public SortOrder getOrder() { + return order; + } + + /** + * The number of values to store per bucket. + */ + public int getBucketSize() { + return bucketSize; + } + + /** + * Get the first and last indexes (inclusive, exclusive) of the values for a bucket. + * Returns [0, 0] if the bucket has never been collected. + */ + private Tuple getBucketValuesIndexes(int bucket) { + long rootIndex = (long) bucket * bucketSize; + if (rootIndex >= values.size()) { + // We've never seen this bucket. + return Tuple.tuple(0L, 0L); + } + long start = inHeapMode(bucket) ? rootIndex : (rootIndex + getNextGatherOffset(rootIndex) + 1); + long end = rootIndex + bucketSize; + return Tuple.tuple(start, end); + } + + /** + * Merge the values from {@code other}'s {@code otherGroupId} into {@code groupId}. + */ + public void merge(int groupId, DoubleBucketedSort other, int otherGroupId) { + var otherBounds = other.getBucketValuesIndexes(otherGroupId); + + // TODO: This can be improved for heapified buckets by making use of the heap structures + for (long i = otherBounds.v1(); i < otherBounds.v2(); i++) { + collect(other.values.get(i), groupId); + } + } + + /** + * Creates a block with the values from the {@code selected} groups. + */ + public Block toBlock(BlockFactory blockFactory, IntVector selected) { + // Check if the selected groups are all empty, to avoid allocating extra memory + if (IntStream.range(0, selected.getPositionCount()).map(selected::getInt).noneMatch(bucket -> { + var bounds = this.getBucketValuesIndexes(bucket); + var size = bounds.v2() - bounds.v1(); + + return size > 0; + })) { + return blockFactory.newConstantNullBlock(selected.getPositionCount()); + } + + // Used to sort the values in the bucket. + var bucketValues = new double[bucketSize]; + + try (var builder = blockFactory.newDoubleBlockBuilder(selected.getPositionCount())) { + for (int s = 0; s < selected.getPositionCount(); s++) { + int bucket = selected.getInt(s); + + var bounds = getBucketValuesIndexes(bucket); + var size = bounds.v2() - bounds.v1(); + + if (size == 0) { + builder.appendNull(); + continue; + } + + if (size == 1) { + builder.appendDouble(values.get(bounds.v1())); + continue; + } + + for (int i = 0; i < size; i++) { + bucketValues[i] = values.get(bounds.v1() + i); + } + + // TODO: Make use of heap structures to faster iterate in order instead of copying and sorting + Arrays.sort(bucketValues, 0, (int) size); + + builder.beginPositionEntry(); + if (order == SortOrder.ASC) { + for (int i = 0; i < size; i++) { + builder.appendDouble(bucketValues[i]); + } + } else { + for (int i = (int) size - 1; i >= 0; i--) { + builder.appendDouble(bucketValues[i]); + } + } + builder.endPositionEntry(); + } + return builder.build(); + } + } + + /** + * Is this bucket a min heap {@code true} or in gathering mode {@code false}? + */ + private boolean inHeapMode(int bucket) { + return heapMode.get(bucket); + } + + /** + * Get the next index that should be "gathered" for a bucket rooted + * at {@code rootIndex}. + */ + private int getNextGatherOffset(long rootIndex) { + return (int) values.get(rootIndex); + } + + /** + * Set the next index that should be "gathered" for a bucket rooted + * at {@code rootIndex}. + */ + private void setNextGatherOffset(long rootIndex, int offset) { + values.set(rootIndex, offset); + } + + /** + * {@code true} if the entry at index {@code lhs} is "better" than + * the entry at {@code rhs}. "Better" in this means "lower" for + * {@link SortOrder#ASC} and "higher" for {@link SortOrder#DESC}. + */ + private boolean betterThan(double lhs, double rhs) { + return getOrder().reverseMul() * Double.compare(lhs, rhs) < 0; + } + + /** + * Swap the data at two indices. + */ + private void swap(long lhs, long rhs) { + var tmp = values.get(lhs); + values.set(lhs, values.get(rhs)); + values.set(rhs, tmp); + } + + /** + * Allocate storage for more buckets and store the "next gather offset" + * for those new buckets. + */ + private void grow(long minSize) { + long oldMax = values.size(); + values = bigArrays.grow(values, minSize); + // Set the next gather offsets for all newly allocated buckets. + setNextGatherOffsets(oldMax - (oldMax % getBucketSize())); + } + + /** + * Maintain the "next gather offsets" for newly allocated buckets. + */ + private void setNextGatherOffsets(long startingAt) { + int nextOffset = getBucketSize() - 1; + for (long bucketRoot = startingAt; bucketRoot < values.size(); bucketRoot += getBucketSize()) { + setNextGatherOffset(bucketRoot, nextOffset); + } + } + + /** + * Heapify a bucket whose entries are in random order. + *

    + * This works by validating the heap property on each node, iterating + * "upwards", pushing any out of order parents "down". Check out the + * wikipedia + * entry on binary heaps for more about this. + *

    + *

    + * While this *looks* like it could easily be {@code O(n * log n)}, it is + * a fairly well studied algorithm attributed to Floyd. There's + * been a bunch of work that puts this at {@code O(n)}, close to 1.88n worst + * case. + *

    + * + * @param rootIndex the index the start of the bucket + */ + private void heapify(long rootIndex) { + int maxParent = bucketSize / 2 - 1; + for (int parent = maxParent; parent >= 0; parent--) { + downHeap(rootIndex, parent); + } + } + + /** + * Correct the heap invariant of a parent and its children. This + * runs in {@code O(log n)} time. + * @param rootIndex index of the start of the bucket + * @param parent Index within the bucket of the parent to check. + * For example, 0 is the "root". + */ + private void downHeap(long rootIndex, int parent) { + while (true) { + long parentIndex = rootIndex + parent; + int worst = parent; + long worstIndex = parentIndex; + int leftChild = parent * 2 + 1; + long leftIndex = rootIndex + leftChild; + if (leftChild < bucketSize) { + if (betterThan(values.get(worstIndex), values.get(leftIndex))) { + worst = leftChild; + worstIndex = leftIndex; + } + int rightChild = leftChild + 1; + long rightIndex = rootIndex + rightChild; + if (rightChild < bucketSize && betterThan(values.get(worstIndex), values.get(rightIndex))) { + worst = rightChild; + worstIndex = rightIndex; + } + } + if (worst == parent) { + break; + } + swap(worstIndex, parentIndex); + parent = worst; + } + } + + @Override + public final void close() { + Releasables.close(values, heapMode); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/sort/IntBucketedSort.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/sort/IntBucketedSort.java new file mode 100644 index 0000000000000..04a635d75fe52 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/sort/IntBucketedSort.java @@ -0,0 +1,346 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data.sort; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.common.util.IntArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.search.sort.BucketedSort; +import org.elasticsearch.search.sort.SortOrder; + +import java.util.Arrays; +import java.util.stream.IntStream; + +/** + * Aggregates the top N int values per bucket. + * See {@link BucketedSort} for more information. + * This class is generated. Edit @{code X-BucketedSort.java.st} instead of this file. + */ +public class IntBucketedSort implements Releasable { + + private final BigArrays bigArrays; + private final SortOrder order; + private final int bucketSize; + /** + * {@code true} if the bucket is in heap mode, {@code false} if + * it is still gathering. + */ + private final BitArray heapMode; + /** + * An array containing all the values on all buckets. The structure is as follows: + *

    + * For each bucket, there are bucketSize elements, based on the bucket id (0, 1, 2...). + * Then, for each bucket, it can be in 2 states: + *

    + *
      + *
    • + * Gather mode: All buckets start in gather mode, and remain here while they have less than bucketSize elements. + * In gather mode, the elements are stored in the array from the highest index to the lowest index. + * The lowest index contains the offset to the next slot to be filled. + *

      + * This allows us to insert elements in O(1) time. + *

      + *

      + * When the bucketSize-th element is collected, the bucket transitions to heap mode, by heapifying its contents. + *

      + *
    • + *
    • + * Heap mode: The bucket slots are organized as a min heap structure. + *

      + * The root of the heap is the minimum value in the bucket, + * which allows us to quickly discard new values that are not in the top N. + *

      + *
    • + *
    + */ + private IntArray values; + + public IntBucketedSort(BigArrays bigArrays, SortOrder order, int bucketSize) { + this.bigArrays = bigArrays; + this.order = order; + this.bucketSize = bucketSize; + heapMode = new BitArray(0, bigArrays); + + boolean success = false; + try { + values = bigArrays.newIntArray(0, false); + success = true; + } finally { + if (success == false) { + close(); + } + } + } + + /** + * Collects a {@code value} into a {@code bucket}. + *

    + * It may or may not be inserted in the heap, depending on if it is better than the current root. + *

    + */ + public void collect(int value, int bucket) { + long rootIndex = (long) bucket * bucketSize; + if (inHeapMode(bucket)) { + if (betterThan(value, values.get(rootIndex))) { + values.set(rootIndex, value); + downHeap(rootIndex, 0); + } + return; + } + // Gathering mode + long requiredSize = rootIndex + bucketSize; + if (values.size() < requiredSize) { + grow(requiredSize); + } + int next = getNextGatherOffset(rootIndex); + assert 0 <= next && next < bucketSize + : "Expected next to be in the range of valid buckets [0 <= " + next + " < " + bucketSize + "]"; + long index = next + rootIndex; + values.set(index, value); + if (next == 0) { + heapMode.set(bucket); + heapify(rootIndex); + } else { + setNextGatherOffset(rootIndex, next - 1); + } + } + + /** + * The order of the sort. + */ + public SortOrder getOrder() { + return order; + } + + /** + * The number of values to store per bucket. + */ + public int getBucketSize() { + return bucketSize; + } + + /** + * Get the first and last indexes (inclusive, exclusive) of the values for a bucket. + * Returns [0, 0] if the bucket has never been collected. + */ + private Tuple getBucketValuesIndexes(int bucket) { + long rootIndex = (long) bucket * bucketSize; + if (rootIndex >= values.size()) { + // We've never seen this bucket. + return Tuple.tuple(0L, 0L); + } + long start = inHeapMode(bucket) ? rootIndex : (rootIndex + getNextGatherOffset(rootIndex) + 1); + long end = rootIndex + bucketSize; + return Tuple.tuple(start, end); + } + + /** + * Merge the values from {@code other}'s {@code otherGroupId} into {@code groupId}. + */ + public void merge(int groupId, IntBucketedSort other, int otherGroupId) { + var otherBounds = other.getBucketValuesIndexes(otherGroupId); + + // TODO: This can be improved for heapified buckets by making use of the heap structures + for (long i = otherBounds.v1(); i < otherBounds.v2(); i++) { + collect(other.values.get(i), groupId); + } + } + + /** + * Creates a block with the values from the {@code selected} groups. + */ + public Block toBlock(BlockFactory blockFactory, IntVector selected) { + // Check if the selected groups are all empty, to avoid allocating extra memory + if (IntStream.range(0, selected.getPositionCount()).map(selected::getInt).noneMatch(bucket -> { + var bounds = this.getBucketValuesIndexes(bucket); + var size = bounds.v2() - bounds.v1(); + + return size > 0; + })) { + return blockFactory.newConstantNullBlock(selected.getPositionCount()); + } + + // Used to sort the values in the bucket. + var bucketValues = new int[bucketSize]; + + try (var builder = blockFactory.newIntBlockBuilder(selected.getPositionCount())) { + for (int s = 0; s < selected.getPositionCount(); s++) { + int bucket = selected.getInt(s); + + var bounds = getBucketValuesIndexes(bucket); + var size = bounds.v2() - bounds.v1(); + + if (size == 0) { + builder.appendNull(); + continue; + } + + if (size == 1) { + builder.appendInt(values.get(bounds.v1())); + continue; + } + + for (int i = 0; i < size; i++) { + bucketValues[i] = values.get(bounds.v1() + i); + } + + // TODO: Make use of heap structures to faster iterate in order instead of copying and sorting + Arrays.sort(bucketValues, 0, (int) size); + + builder.beginPositionEntry(); + if (order == SortOrder.ASC) { + for (int i = 0; i < size; i++) { + builder.appendInt(bucketValues[i]); + } + } else { + for (int i = (int) size - 1; i >= 0; i--) { + builder.appendInt(bucketValues[i]); + } + } + builder.endPositionEntry(); + } + return builder.build(); + } + } + + /** + * Is this bucket a min heap {@code true} or in gathering mode {@code false}? + */ + private boolean inHeapMode(int bucket) { + return heapMode.get(bucket); + } + + /** + * Get the next index that should be "gathered" for a bucket rooted + * at {@code rootIndex}. + */ + private int getNextGatherOffset(long rootIndex) { + return values.get(rootIndex); + } + + /** + * Set the next index that should be "gathered" for a bucket rooted + * at {@code rootIndex}. + */ + private void setNextGatherOffset(long rootIndex, int offset) { + values.set(rootIndex, offset); + } + + /** + * {@code true} if the entry at index {@code lhs} is "better" than + * the entry at {@code rhs}. "Better" in this means "lower" for + * {@link SortOrder#ASC} and "higher" for {@link SortOrder#DESC}. + */ + private boolean betterThan(int lhs, int rhs) { + return getOrder().reverseMul() * Integer.compare(lhs, rhs) < 0; + } + + /** + * Swap the data at two indices. + */ + private void swap(long lhs, long rhs) { + var tmp = values.get(lhs); + values.set(lhs, values.get(rhs)); + values.set(rhs, tmp); + } + + /** + * Allocate storage for more buckets and store the "next gather offset" + * for those new buckets. + */ + private void grow(long minSize) { + long oldMax = values.size(); + values = bigArrays.grow(values, minSize); + // Set the next gather offsets for all newly allocated buckets. + setNextGatherOffsets(oldMax - (oldMax % getBucketSize())); + } + + /** + * Maintain the "next gather offsets" for newly allocated buckets. + */ + private void setNextGatherOffsets(long startingAt) { + int nextOffset = getBucketSize() - 1; + for (long bucketRoot = startingAt; bucketRoot < values.size(); bucketRoot += getBucketSize()) { + setNextGatherOffset(bucketRoot, nextOffset); + } + } + + /** + * Heapify a bucket whose entries are in random order. + *

    + * This works by validating the heap property on each node, iterating + * "upwards", pushing any out of order parents "down". Check out the + * wikipedia + * entry on binary heaps for more about this. + *

    + *

    + * While this *looks* like it could easily be {@code O(n * log n)}, it is + * a fairly well studied algorithm attributed to Floyd. There's + * been a bunch of work that puts this at {@code O(n)}, close to 1.88n worst + * case. + *

    + * + * @param rootIndex the index the start of the bucket + */ + private void heapify(long rootIndex) { + int maxParent = bucketSize / 2 - 1; + for (int parent = maxParent; parent >= 0; parent--) { + downHeap(rootIndex, parent); + } + } + + /** + * Correct the heap invariant of a parent and its children. This + * runs in {@code O(log n)} time. + * @param rootIndex index of the start of the bucket + * @param parent Index within the bucket of the parent to check. + * For example, 0 is the "root". + */ + private void downHeap(long rootIndex, int parent) { + while (true) { + long parentIndex = rootIndex + parent; + int worst = parent; + long worstIndex = parentIndex; + int leftChild = parent * 2 + 1; + long leftIndex = rootIndex + leftChild; + if (leftChild < bucketSize) { + if (betterThan(values.get(worstIndex), values.get(leftIndex))) { + worst = leftChild; + worstIndex = leftIndex; + } + int rightChild = leftChild + 1; + long rightIndex = rootIndex + rightChild; + if (rightChild < bucketSize && betterThan(values.get(worstIndex), values.get(rightIndex))) { + worst = rightChild; + worstIndex = rightIndex; + } + } + if (worst == parent) { + break; + } + swap(worstIndex, parentIndex); + parent = worst; + } + } + + @Override + public final void close() { + Releasables.close(values, heapMode); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/sort/LongBucketedSort.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/sort/LongBucketedSort.java new file mode 100644 index 0000000000000..e08c25256944b --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/sort/LongBucketedSort.java @@ -0,0 +1,346 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data.sort; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.search.sort.BucketedSort; +import org.elasticsearch.search.sort.SortOrder; + +import java.util.Arrays; +import java.util.stream.IntStream; + +/** + * Aggregates the top N long values per bucket. + * See {@link BucketedSort} for more information. + * This class is generated. Edit @{code X-BucketedSort.java.st} instead of this file. + */ +public class LongBucketedSort implements Releasable { + + private final BigArrays bigArrays; + private final SortOrder order; + private final int bucketSize; + /** + * {@code true} if the bucket is in heap mode, {@code false} if + * it is still gathering. + */ + private final BitArray heapMode; + /** + * An array containing all the values on all buckets. The structure is as follows: + *

    + * For each bucket, there are bucketSize elements, based on the bucket id (0, 1, 2...). + * Then, for each bucket, it can be in 2 states: + *

    + *
      + *
    • + * Gather mode: All buckets start in gather mode, and remain here while they have less than bucketSize elements. + * In gather mode, the elements are stored in the array from the highest index to the lowest index. + * The lowest index contains the offset to the next slot to be filled. + *

      + * This allows us to insert elements in O(1) time. + *

      + *

      + * When the bucketSize-th element is collected, the bucket transitions to heap mode, by heapifying its contents. + *

      + *
    • + *
    • + * Heap mode: The bucket slots are organized as a min heap structure. + *

      + * The root of the heap is the minimum value in the bucket, + * which allows us to quickly discard new values that are not in the top N. + *

      + *
    • + *
    + */ + private LongArray values; + + public LongBucketedSort(BigArrays bigArrays, SortOrder order, int bucketSize) { + this.bigArrays = bigArrays; + this.order = order; + this.bucketSize = bucketSize; + heapMode = new BitArray(0, bigArrays); + + boolean success = false; + try { + values = bigArrays.newLongArray(0, false); + success = true; + } finally { + if (success == false) { + close(); + } + } + } + + /** + * Collects a {@code value} into a {@code bucket}. + *

    + * It may or may not be inserted in the heap, depending on if it is better than the current root. + *

    + */ + public void collect(long value, int bucket) { + long rootIndex = (long) bucket * bucketSize; + if (inHeapMode(bucket)) { + if (betterThan(value, values.get(rootIndex))) { + values.set(rootIndex, value); + downHeap(rootIndex, 0); + } + return; + } + // Gathering mode + long requiredSize = rootIndex + bucketSize; + if (values.size() < requiredSize) { + grow(requiredSize); + } + int next = getNextGatherOffset(rootIndex); + assert 0 <= next && next < bucketSize + : "Expected next to be in the range of valid buckets [0 <= " + next + " < " + bucketSize + "]"; + long index = next + rootIndex; + values.set(index, value); + if (next == 0) { + heapMode.set(bucket); + heapify(rootIndex); + } else { + setNextGatherOffset(rootIndex, next - 1); + } + } + + /** + * The order of the sort. + */ + public SortOrder getOrder() { + return order; + } + + /** + * The number of values to store per bucket. + */ + public int getBucketSize() { + return bucketSize; + } + + /** + * Get the first and last indexes (inclusive, exclusive) of the values for a bucket. + * Returns [0, 0] if the bucket has never been collected. + */ + private Tuple getBucketValuesIndexes(int bucket) { + long rootIndex = (long) bucket * bucketSize; + if (rootIndex >= values.size()) { + // We've never seen this bucket. + return Tuple.tuple(0L, 0L); + } + long start = inHeapMode(bucket) ? rootIndex : (rootIndex + getNextGatherOffset(rootIndex) + 1); + long end = rootIndex + bucketSize; + return Tuple.tuple(start, end); + } + + /** + * Merge the values from {@code other}'s {@code otherGroupId} into {@code groupId}. + */ + public void merge(int groupId, LongBucketedSort other, int otherGroupId) { + var otherBounds = other.getBucketValuesIndexes(otherGroupId); + + // TODO: This can be improved for heapified buckets by making use of the heap structures + for (long i = otherBounds.v1(); i < otherBounds.v2(); i++) { + collect(other.values.get(i), groupId); + } + } + + /** + * Creates a block with the values from the {@code selected} groups. + */ + public Block toBlock(BlockFactory blockFactory, IntVector selected) { + // Check if the selected groups are all empty, to avoid allocating extra memory + if (IntStream.range(0, selected.getPositionCount()).map(selected::getInt).noneMatch(bucket -> { + var bounds = this.getBucketValuesIndexes(bucket); + var size = bounds.v2() - bounds.v1(); + + return size > 0; + })) { + return blockFactory.newConstantNullBlock(selected.getPositionCount()); + } + + // Used to sort the values in the bucket. + var bucketValues = new long[bucketSize]; + + try (var builder = blockFactory.newLongBlockBuilder(selected.getPositionCount())) { + for (int s = 0; s < selected.getPositionCount(); s++) { + int bucket = selected.getInt(s); + + var bounds = getBucketValuesIndexes(bucket); + var size = bounds.v2() - bounds.v1(); + + if (size == 0) { + builder.appendNull(); + continue; + } + + if (size == 1) { + builder.appendLong(values.get(bounds.v1())); + continue; + } + + for (int i = 0; i < size; i++) { + bucketValues[i] = values.get(bounds.v1() + i); + } + + // TODO: Make use of heap structures to faster iterate in order instead of copying and sorting + Arrays.sort(bucketValues, 0, (int) size); + + builder.beginPositionEntry(); + if (order == SortOrder.ASC) { + for (int i = 0; i < size; i++) { + builder.appendLong(bucketValues[i]); + } + } else { + for (int i = (int) size - 1; i >= 0; i--) { + builder.appendLong(bucketValues[i]); + } + } + builder.endPositionEntry(); + } + return builder.build(); + } + } + + /** + * Is this bucket a min heap {@code true} or in gathering mode {@code false}? + */ + private boolean inHeapMode(int bucket) { + return heapMode.get(bucket); + } + + /** + * Get the next index that should be "gathered" for a bucket rooted + * at {@code rootIndex}. + */ + private int getNextGatherOffset(long rootIndex) { + return (int) values.get(rootIndex); + } + + /** + * Set the next index that should be "gathered" for a bucket rooted + * at {@code rootIndex}. + */ + private void setNextGatherOffset(long rootIndex, int offset) { + values.set(rootIndex, offset); + } + + /** + * {@code true} if the entry at index {@code lhs} is "better" than + * the entry at {@code rhs}. "Better" in this means "lower" for + * {@link SortOrder#ASC} and "higher" for {@link SortOrder#DESC}. + */ + private boolean betterThan(long lhs, long rhs) { + return getOrder().reverseMul() * Long.compare(lhs, rhs) < 0; + } + + /** + * Swap the data at two indices. + */ + private void swap(long lhs, long rhs) { + var tmp = values.get(lhs); + values.set(lhs, values.get(rhs)); + values.set(rhs, tmp); + } + + /** + * Allocate storage for more buckets and store the "next gather offset" + * for those new buckets. + */ + private void grow(long minSize) { + long oldMax = values.size(); + values = bigArrays.grow(values, minSize); + // Set the next gather offsets for all newly allocated buckets. + setNextGatherOffsets(oldMax - (oldMax % getBucketSize())); + } + + /** + * Maintain the "next gather offsets" for newly allocated buckets. + */ + private void setNextGatherOffsets(long startingAt) { + int nextOffset = getBucketSize() - 1; + for (long bucketRoot = startingAt; bucketRoot < values.size(); bucketRoot += getBucketSize()) { + setNextGatherOffset(bucketRoot, nextOffset); + } + } + + /** + * Heapify a bucket whose entries are in random order. + *

    + * This works by validating the heap property on each node, iterating + * "upwards", pushing any out of order parents "down". Check out the + * wikipedia + * entry on binary heaps for more about this. + *

    + *

    + * While this *looks* like it could easily be {@code O(n * log n)}, it is + * a fairly well studied algorithm attributed to Floyd. There's + * been a bunch of work that puts this at {@code O(n)}, close to 1.88n worst + * case. + *

    + * + * @param rootIndex the index the start of the bucket + */ + private void heapify(long rootIndex) { + int maxParent = bucketSize / 2 - 1; + for (int parent = maxParent; parent >= 0; parent--) { + downHeap(rootIndex, parent); + } + } + + /** + * Correct the heap invariant of a parent and its children. This + * runs in {@code O(log n)} time. + * @param rootIndex index of the start of the bucket + * @param parent Index within the bucket of the parent to check. + * For example, 0 is the "root". + */ + private void downHeap(long rootIndex, int parent) { + while (true) { + long parentIndex = rootIndex + parent; + int worst = parent; + long worstIndex = parentIndex; + int leftChild = parent * 2 + 1; + long leftIndex = rootIndex + leftChild; + if (leftChild < bucketSize) { + if (betterThan(values.get(worstIndex), values.get(leftIndex))) { + worst = leftChild; + worstIndex = leftIndex; + } + int rightChild = leftChild + 1; + long rightIndex = rootIndex + rightChild; + if (rightChild < bucketSize && betterThan(values.get(worstIndex), values.get(rightIndex))) { + worst = rightChild; + worstIndex = rightIndex; + } + } + if (worst == parent) { + break; + } + swap(worstIndex, parentIndex); + parent = worst; + } + } + + @Override + public final void close() { + Releasables.close(values, heapMode); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunction.java new file mode 100644 index 0000000000000..d52d25941780c --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunction.java @@ -0,0 +1,126 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link TopListDoubleAggregator}. + * This class is generated. Do not edit it. + */ +public final class TopListDoubleAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("topList", ElementType.DOUBLE) ); + + private final DriverContext driverContext; + + private final TopListDoubleAggregator.SingleState state; + + private final List channels; + + private final int limit; + + private final boolean ascending; + + public TopListDoubleAggregatorFunction(DriverContext driverContext, List channels, + TopListDoubleAggregator.SingleState state, int limit, boolean ascending) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + this.limit = limit; + this.ascending = ascending; + } + + public static TopListDoubleAggregatorFunction create(DriverContext driverContext, + List channels, int limit, boolean ascending) { + return new TopListDoubleAggregatorFunction(driverContext, channels, TopListDoubleAggregator.initSingle(driverContext.bigArrays(), limit, ascending), limit, ascending); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page) { + DoubleBlock block = page.getBlock(channels.get(0)); + DoubleVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + } + + private void addRawVector(DoubleVector vector) { + for (int i = 0; i < vector.getPositionCount(); i++) { + TopListDoubleAggregator.combine(state, vector.getDouble(i)); + } + } + + private void addRawBlock(DoubleBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + TopListDoubleAggregator.combine(state, block.getDouble(i)); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block topListUncast = page.getBlock(channels.get(0)); + if (topListUncast.areAllValuesNull()) { + return; + } + DoubleBlock topList = (DoubleBlock) topListUncast; + assert topList.getPositionCount() == 1; + TopListDoubleAggregator.combineIntermediate(state, topList); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = TopListDoubleAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..48df091d339b6 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunctionSupplier.java @@ -0,0 +1,45 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link TopListDoubleAggregator}. + * This class is generated. Do not edit it. + */ +public final class TopListDoubleAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + private final int limit; + + private final boolean ascending; + + public TopListDoubleAggregatorFunctionSupplier(List channels, int limit, + boolean ascending) { + this.channels = channels; + this.limit = limit; + this.ascending = ascending; + } + + @Override + public TopListDoubleAggregatorFunction aggregator(DriverContext driverContext) { + return TopListDoubleAggregatorFunction.create(driverContext, channels, limit, ascending); + } + + @Override + public TopListDoubleGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return TopListDoubleGroupingAggregatorFunction.create(channels, driverContext, limit, ascending); + } + + @Override + public String describe() { + return "top_list of doubles"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..0e3b98bb0f7e5 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleGroupingAggregatorFunction.java @@ -0,0 +1,202 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link TopListDoubleAggregator}. + * This class is generated. Do not edit it. + */ +public final class TopListDoubleGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("topList", ElementType.DOUBLE) ); + + private final TopListDoubleAggregator.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + private final int limit; + + private final boolean ascending; + + public TopListDoubleGroupingAggregatorFunction(List channels, + TopListDoubleAggregator.GroupingState state, DriverContext driverContext, int limit, + boolean ascending) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + this.limit = limit; + this.ascending = ascending; + } + + public static TopListDoubleGroupingAggregatorFunction create(List channels, + DriverContext driverContext, int limit, boolean ascending) { + return new TopListDoubleGroupingAggregatorFunction(channels, TopListDoubleAggregator.initGrouping(driverContext.bigArrays(), limit, ascending), driverContext, limit, ascending); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + DoubleBlock valuesBlock = page.getBlock(channels.get(0)); + DoubleVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + TopListDoubleAggregator.combine(state, groupId, values.getDouble(v)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, DoubleVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + TopListDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + TopListDoubleAggregator.combine(state, groupId, values.getDouble(v)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, DoubleVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + TopListDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block topListUncast = page.getBlock(channels.get(0)); + if (topListUncast.areAllValuesNull()) { + return; + } + DoubleBlock topList = (DoubleBlock) topListUncast; + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + TopListDoubleAggregator.combineIntermediate(state, groupId, topList, groupPosition + positionOffset); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + TopListDoubleAggregator.GroupingState inState = ((TopListDoubleGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + TopListDoubleAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = TopListDoubleAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunction.java new file mode 100644 index 0000000000000..e885b285c4a51 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunction.java @@ -0,0 +1,126 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link TopListIntAggregator}. + * This class is generated. Do not edit it. + */ +public final class TopListIntAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("topList", ElementType.INT) ); + + private final DriverContext driverContext; + + private final TopListIntAggregator.SingleState state; + + private final List channels; + + private final int limit; + + private final boolean ascending; + + public TopListIntAggregatorFunction(DriverContext driverContext, List channels, + TopListIntAggregator.SingleState state, int limit, boolean ascending) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + this.limit = limit; + this.ascending = ascending; + } + + public static TopListIntAggregatorFunction create(DriverContext driverContext, + List channels, int limit, boolean ascending) { + return new TopListIntAggregatorFunction(driverContext, channels, TopListIntAggregator.initSingle(driverContext.bigArrays(), limit, ascending), limit, ascending); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page) { + IntBlock block = page.getBlock(channels.get(0)); + IntVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + } + + private void addRawVector(IntVector vector) { + for (int i = 0; i < vector.getPositionCount(); i++) { + TopListIntAggregator.combine(state, vector.getInt(i)); + } + } + + private void addRawBlock(IntBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + TopListIntAggregator.combine(state, block.getInt(i)); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block topListUncast = page.getBlock(channels.get(0)); + if (topListUncast.areAllValuesNull()) { + return; + } + IntBlock topList = (IntBlock) topListUncast; + assert topList.getPositionCount() == 1; + TopListIntAggregator.combineIntermediate(state, topList); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = TopListIntAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..d8bf91ba85541 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunctionSupplier.java @@ -0,0 +1,45 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link TopListIntAggregator}. + * This class is generated. Do not edit it. + */ +public final class TopListIntAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + private final int limit; + + private final boolean ascending; + + public TopListIntAggregatorFunctionSupplier(List channels, int limit, + boolean ascending) { + this.channels = channels; + this.limit = limit; + this.ascending = ascending; + } + + @Override + public TopListIntAggregatorFunction aggregator(DriverContext driverContext) { + return TopListIntAggregatorFunction.create(driverContext, channels, limit, ascending); + } + + @Override + public TopListIntGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return TopListIntGroupingAggregatorFunction.create(channels, driverContext, limit, ascending); + } + + @Override + public String describe() { + return "top_list of ints"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..820ebb95e530c --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntGroupingAggregatorFunction.java @@ -0,0 +1,200 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link TopListIntAggregator}. + * This class is generated. Do not edit it. + */ +public final class TopListIntGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("topList", ElementType.INT) ); + + private final TopListIntAggregator.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + private final int limit; + + private final boolean ascending; + + public TopListIntGroupingAggregatorFunction(List channels, + TopListIntAggregator.GroupingState state, DriverContext driverContext, int limit, + boolean ascending) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + this.limit = limit; + this.ascending = ascending; + } + + public static TopListIntGroupingAggregatorFunction create(List channels, + DriverContext driverContext, int limit, boolean ascending) { + return new TopListIntGroupingAggregatorFunction(channels, TopListIntAggregator.initGrouping(driverContext.bigArrays(), limit, ascending), driverContext, limit, ascending); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + IntBlock valuesBlock = page.getBlock(channels.get(0)); + IntVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + TopListIntAggregator.combine(state, groupId, values.getInt(v)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, IntVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + TopListIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + TopListIntAggregator.combine(state, groupId, values.getInt(v)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, IntVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + TopListIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block topListUncast = page.getBlock(channels.get(0)); + if (topListUncast.areAllValuesNull()) { + return; + } + IntBlock topList = (IntBlock) topListUncast; + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + TopListIntAggregator.combineIntermediate(state, groupId, topList, groupPosition + positionOffset); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + TopListIntAggregator.GroupingState inState = ((TopListIntGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + TopListIntAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = TopListIntAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunction.java new file mode 100644 index 0000000000000..1a09a1a860e2f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunction.java @@ -0,0 +1,126 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link TopListLongAggregator}. + * This class is generated. Do not edit it. + */ +public final class TopListLongAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("topList", ElementType.LONG) ); + + private final DriverContext driverContext; + + private final TopListLongAggregator.SingleState state; + + private final List channels; + + private final int limit; + + private final boolean ascending; + + public TopListLongAggregatorFunction(DriverContext driverContext, List channels, + TopListLongAggregator.SingleState state, int limit, boolean ascending) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + this.limit = limit; + this.ascending = ascending; + } + + public static TopListLongAggregatorFunction create(DriverContext driverContext, + List channels, int limit, boolean ascending) { + return new TopListLongAggregatorFunction(driverContext, channels, TopListLongAggregator.initSingle(driverContext.bigArrays(), limit, ascending), limit, ascending); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page) { + LongBlock block = page.getBlock(channels.get(0)); + LongVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + } + + private void addRawVector(LongVector vector) { + for (int i = 0; i < vector.getPositionCount(); i++) { + TopListLongAggregator.combine(state, vector.getLong(i)); + } + } + + private void addRawBlock(LongBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + TopListLongAggregator.combine(state, block.getLong(i)); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block topListUncast = page.getBlock(channels.get(0)); + if (topListUncast.areAllValuesNull()) { + return; + } + LongBlock topList = (LongBlock) topListUncast; + assert topList.getPositionCount() == 1; + TopListLongAggregator.combineIntermediate(state, topList); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = TopListLongAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..617895fbff1a3 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunctionSupplier.java @@ -0,0 +1,45 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link TopListLongAggregator}. + * This class is generated. Do not edit it. + */ +public final class TopListLongAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + private final int limit; + + private final boolean ascending; + + public TopListLongAggregatorFunctionSupplier(List channels, int limit, + boolean ascending) { + this.channels = channels; + this.limit = limit; + this.ascending = ascending; + } + + @Override + public TopListLongAggregatorFunction aggregator(DriverContext driverContext) { + return TopListLongAggregatorFunction.create(driverContext, channels, limit, ascending); + } + + @Override + public TopListLongGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return TopListLongGroupingAggregatorFunction.create(channels, driverContext, limit, ascending); + } + + @Override + public String describe() { + return "top_list of longs"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..cadb48b7d29d4 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongGroupingAggregatorFunction.java @@ -0,0 +1,202 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link TopListLongAggregator}. + * This class is generated. Do not edit it. + */ +public final class TopListLongGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("topList", ElementType.LONG) ); + + private final TopListLongAggregator.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + private final int limit; + + private final boolean ascending; + + public TopListLongGroupingAggregatorFunction(List channels, + TopListLongAggregator.GroupingState state, DriverContext driverContext, int limit, + boolean ascending) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + this.limit = limit; + this.ascending = ascending; + } + + public static TopListLongGroupingAggregatorFunction create(List channels, + DriverContext driverContext, int limit, boolean ascending) { + return new TopListLongGroupingAggregatorFunction(channels, TopListLongAggregator.initGrouping(driverContext.bigArrays(), limit, ascending), driverContext, limit, ascending); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + LongBlock valuesBlock = page.getBlock(channels.get(0)); + LongVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + TopListLongAggregator.combine(state, groupId, values.getLong(v)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, LongVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + TopListLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + TopListLongAggregator.combine(state, groupId, values.getLong(v)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, LongVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + TopListLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block topListUncast = page.getBlock(channels.get(0)); + if (topListUncast.areAllValuesNull()) { + return; + } + LongBlock topList = (LongBlock) topListUncast; + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + TopListLongAggregator.combineIntermediate(state, groupId, topList, groupPosition + positionOffset); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + TopListLongAggregator.GroupingState inState = ((TopListLongGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + TopListLongAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = TopListLongAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/module-info.java b/x-pack/plugin/esql/compute/src/main/java/module-info.java index 3772d6c83f5aa..dc8cda0fbe3c8 100644 --- a/x-pack/plugin/esql/compute/src/main/java/module-info.java +++ b/x-pack/plugin/esql/compute/src/main/java/module-info.java @@ -30,4 +30,5 @@ exports org.elasticsearch.compute.operator.topn; exports org.elasticsearch.compute.operator.mvdedupe; exports org.elasticsearch.compute.aggregation.table; + exports org.elasticsearch.compute.data.sort; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-TopListAggregator.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-TopListAggregator.java.st new file mode 100644 index 0000000000000..810311154503e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-TopListAggregator.java.st @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +$if(!long)$ +import org.elasticsearch.compute.data.$Type$Block; +$endif$ +import org.elasticsearch.compute.data.IntVector; +$if(long)$ +import org.elasticsearch.compute.data.$Type$Block; +$endif$ +import org.elasticsearch.compute.data.sort.$Type$BucketedSort; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.search.sort.SortOrder; + +/** + * Aggregates the top N field values for $type$. + */ +@Aggregator({ @IntermediateState(name = "topList", type = "$TYPE$_BLOCK") }) +@GroupingAggregator +class TopList$Type$Aggregator { + public static SingleState initSingle(BigArrays bigArrays, int limit, boolean ascending) { + return new SingleState(bigArrays, limit, ascending); + } + + public static void combine(SingleState state, $type$ v) { + state.add(v); + } + + public static void combineIntermediate(SingleState state, $Type$Block values) { + int start = values.getFirstValueIndex(0); + int end = start + values.getValueCount(0); + for (int i = start; i < end; i++) { + combine(state, values.get$Type$(i)); + } + } + + public static Block evaluateFinal(SingleState state, DriverContext driverContext) { + return state.toBlock(driverContext.blockFactory()); + } + + public static GroupingState initGrouping(BigArrays bigArrays, int limit, boolean ascending) { + return new GroupingState(bigArrays, limit, ascending); + } + + public static void combine(GroupingState state, int groupId, $type$ v) { + state.add(groupId, v); + } + + public static void combineIntermediate(GroupingState state, int groupId, $Type$Block values, int valuesPosition) { + int start = values.getFirstValueIndex(valuesPosition); + int end = start + values.getValueCount(valuesPosition); + for (int i = start; i < end; i++) { + combine(state, groupId, values.get$Type$(i)); + } + } + + public static void combineStates(GroupingState current, int groupId, GroupingState state, int statePosition) { + current.merge(groupId, state, statePosition); + } + + public static Block evaluateFinal(GroupingState state, IntVector selected, DriverContext driverContext) { + return state.toBlock(driverContext.blockFactory(), selected); + } + + public static class GroupingState implements Releasable { + private final $Type$BucketedSort sort; + + private GroupingState(BigArrays bigArrays, int limit, boolean ascending) { + this.sort = new $Type$BucketedSort(bigArrays, ascending ? SortOrder.ASC : SortOrder.DESC, limit); + } + + public void add(int groupId, $type$ value) { + sort.collect(value, groupId); + } + + public void merge(int groupId, GroupingState other, int otherGroupId) { + sort.merge(groupId, other.sort, otherGroupId); + } + + void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + blocks[offset] = toBlock(driverContext.blockFactory(), selected); + } + + Block toBlock(BlockFactory blockFactory, IntVector selected) { + return sort.toBlock(blockFactory, selected); + } + + void enableGroupIdTracking(SeenGroupIds seen) { + // we figure out seen values from nulls on the values block + } + + @Override + public void close() { + Releasables.closeExpectNoException(sort); + } + } + + public static class SingleState implements Releasable { + private final GroupingState internalState; + + private SingleState(BigArrays bigArrays, int limit, boolean ascending) { + this.internalState = new GroupingState(bigArrays, limit, ascending); + } + + public void add($type$ value) { + internalState.add(0, value); + } + + public void merge(GroupingState other) { + internalState.merge(0, other, 0); + } + + void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = toBlock(driverContext.blockFactory()); + } + + Block toBlock(BlockFactory blockFactory) { + try (var intValues = blockFactory.newConstantIntVector(0, 1)) { + return internalState.toBlock(blockFactory, intValues); + } + } + + @Override + public void close() { + Releasables.closeExpectNoException(internalState); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/sort/X-BucketedSort.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/sort/X-BucketedSort.java.st new file mode 100644 index 0000000000000..6587743e34b6f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/sort/X-BucketedSort.java.st @@ -0,0 +1,350 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data.sort; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.common.util.$Type$Array; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.search.sort.BucketedSort; +import org.elasticsearch.search.sort.SortOrder; + +import java.util.Arrays; +import java.util.stream.IntStream; + +/** + * Aggregates the top N $type$ values per bucket. + * See {@link BucketedSort} for more information. + * This class is generated. Edit @{code X-BucketedSort.java.st} instead of this file. + */ +public class $Type$BucketedSort implements Releasable { + + private final BigArrays bigArrays; + private final SortOrder order; + private final int bucketSize; + /** + * {@code true} if the bucket is in heap mode, {@code false} if + * it is still gathering. + */ + private final BitArray heapMode; + /** + * An array containing all the values on all buckets. The structure is as follows: + *

    + * For each bucket, there are bucketSize elements, based on the bucket id (0, 1, 2...). + * Then, for each bucket, it can be in 2 states: + *

    + *
      + *
    • + * Gather mode: All buckets start in gather mode, and remain here while they have less than bucketSize elements. + * In gather mode, the elements are stored in the array from the highest index to the lowest index. + * The lowest index contains the offset to the next slot to be filled. + *

      + * This allows us to insert elements in O(1) time. + *

      + *

      + * When the bucketSize-th element is collected, the bucket transitions to heap mode, by heapifying its contents. + *

      + *
    • + *
    • + * Heap mode: The bucket slots are organized as a min heap structure. + *

      + * The root of the heap is the minimum value in the bucket, + * which allows us to quickly discard new values that are not in the top N. + *

      + *
    • + *
    + */ + private $Type$Array values; + + public $Type$BucketedSort(BigArrays bigArrays, SortOrder order, int bucketSize) { + this.bigArrays = bigArrays; + this.order = order; + this.bucketSize = bucketSize; + heapMode = new BitArray(0, bigArrays); + + boolean success = false; + try { + values = bigArrays.new$Type$Array(0, false); + success = true; + } finally { + if (success == false) { + close(); + } + } + } + + /** + * Collects a {@code value} into a {@code bucket}. + *

    + * It may or may not be inserted in the heap, depending on if it is better than the current root. + *

    + */ + public void collect($type$ value, int bucket) { + long rootIndex = (long) bucket * bucketSize; + if (inHeapMode(bucket)) { + if (betterThan(value, values.get(rootIndex))) { + values.set(rootIndex, value); + downHeap(rootIndex, 0); + } + return; + } + // Gathering mode + long requiredSize = rootIndex + bucketSize; + if (values.size() < requiredSize) { + grow(requiredSize); + } + int next = getNextGatherOffset(rootIndex); + assert 0 <= next && next < bucketSize + : "Expected next to be in the range of valid buckets [0 <= " + next + " < " + bucketSize + "]"; + long index = next + rootIndex; + values.set(index, value); + if (next == 0) { + heapMode.set(bucket); + heapify(rootIndex); + } else { + setNextGatherOffset(rootIndex, next - 1); + } + } + + /** + * The order of the sort. + */ + public SortOrder getOrder() { + return order; + } + + /** + * The number of values to store per bucket. + */ + public int getBucketSize() { + return bucketSize; + } + + /** + * Get the first and last indexes (inclusive, exclusive) of the values for a bucket. + * Returns [0, 0] if the bucket has never been collected. + */ + private Tuple getBucketValuesIndexes(int bucket) { + long rootIndex = (long) bucket * bucketSize; + if (rootIndex >= values.size()) { + // We've never seen this bucket. + return Tuple.tuple(0L, 0L); + } + long start = inHeapMode(bucket) ? rootIndex : (rootIndex + getNextGatherOffset(rootIndex) + 1); + long end = rootIndex + bucketSize; + return Tuple.tuple(start, end); + } + + /** + * Merge the values from {@code other}'s {@code otherGroupId} into {@code groupId}. + */ + public void merge(int groupId, $Type$BucketedSort other, int otherGroupId) { + var otherBounds = other.getBucketValuesIndexes(otherGroupId); + + // TODO: This can be improved for heapified buckets by making use of the heap structures + for (long i = otherBounds.v1(); i < otherBounds.v2(); i++) { + collect(other.values.get(i), groupId); + } + } + + /** + * Creates a block with the values from the {@code selected} groups. + */ + public Block toBlock(BlockFactory blockFactory, IntVector selected) { + // Check if the selected groups are all empty, to avoid allocating extra memory + if (IntStream.range(0, selected.getPositionCount()).map(selected::getInt).noneMatch(bucket -> { + var bounds = this.getBucketValuesIndexes(bucket); + var size = bounds.v2() - bounds.v1(); + + return size > 0; + })) { + return blockFactory.newConstantNullBlock(selected.getPositionCount()); + } + + // Used to sort the values in the bucket. + var bucketValues = new $type$[bucketSize]; + + try (var builder = blockFactory.new$Type$BlockBuilder(selected.getPositionCount())) { + for (int s = 0; s < selected.getPositionCount(); s++) { + int bucket = selected.getInt(s); + + var bounds = getBucketValuesIndexes(bucket); + var size = bounds.v2() - bounds.v1(); + + if (size == 0) { + builder.appendNull(); + continue; + } + + if (size == 1) { + builder.append$Type$(values.get(bounds.v1())); + continue; + } + + for (int i = 0; i < size; i++) { + bucketValues[i] = values.get(bounds.v1() + i); + } + + // TODO: Make use of heap structures to faster iterate in order instead of copying and sorting + Arrays.sort(bucketValues, 0, (int) size); + + builder.beginPositionEntry(); + if (order == SortOrder.ASC) { + for (int i = 0; i < size; i++) { + builder.append$Type$(bucketValues[i]); + } + } else { + for (int i = (int) size - 1; i >= 0; i--) { + builder.append$Type$(bucketValues[i]); + } + } + builder.endPositionEntry(); + } + return builder.build(); + } + } + + /** + * Is this bucket a min heap {@code true} or in gathering mode {@code false}? + */ + private boolean inHeapMode(int bucket) { + return heapMode.get(bucket); + } + + /** + * Get the next index that should be "gathered" for a bucket rooted + * at {@code rootIndex}. + */ + private int getNextGatherOffset(long rootIndex) { +$if(int)$ + return values.get(rootIndex); +$else$ + return (int) values.get(rootIndex); +$endif$ + } + + /** + * Set the next index that should be "gathered" for a bucket rooted + * at {@code rootIndex}. + */ + private void setNextGatherOffset(long rootIndex, int offset) { + values.set(rootIndex, offset); + } + + /** + * {@code true} if the entry at index {@code lhs} is "better" than + * the entry at {@code rhs}. "Better" in this means "lower" for + * {@link SortOrder#ASC} and "higher" for {@link SortOrder#DESC}. + */ + private boolean betterThan($type$ lhs, $type$ rhs) { + return getOrder().reverseMul() * $Wrapper$.compare(lhs, rhs) < 0; + } + + /** + * Swap the data at two indices. + */ + private void swap(long lhs, long rhs) { + var tmp = values.get(lhs); + values.set(lhs, values.get(rhs)); + values.set(rhs, tmp); + } + + /** + * Allocate storage for more buckets and store the "next gather offset" + * for those new buckets. + */ + private void grow(long minSize) { + long oldMax = values.size(); + values = bigArrays.grow(values, minSize); + // Set the next gather offsets for all newly allocated buckets. + setNextGatherOffsets(oldMax - (oldMax % getBucketSize())); + } + + /** + * Maintain the "next gather offsets" for newly allocated buckets. + */ + private void setNextGatherOffsets(long startingAt) { + int nextOffset = getBucketSize() - 1; + for (long bucketRoot = startingAt; bucketRoot < values.size(); bucketRoot += getBucketSize()) { + setNextGatherOffset(bucketRoot, nextOffset); + } + } + + /** + * Heapify a bucket whose entries are in random order. + *

    + * This works by validating the heap property on each node, iterating + * "upwards", pushing any out of order parents "down". Check out the + * wikipedia + * entry on binary heaps for more about this. + *

    + *

    + * While this *looks* like it could easily be {@code O(n * log n)}, it is + * a fairly well studied algorithm attributed to Floyd. There's + * been a bunch of work that puts this at {@code O(n)}, close to 1.88n worst + * case. + *

    + * + * @param rootIndex the index the start of the bucket + */ + private void heapify(long rootIndex) { + int maxParent = bucketSize / 2 - 1; + for (int parent = maxParent; parent >= 0; parent--) { + downHeap(rootIndex, parent); + } + } + + /** + * Correct the heap invariant of a parent and its children. This + * runs in {@code O(log n)} time. + * @param rootIndex index of the start of the bucket + * @param parent Index within the bucket of the parent to check. + * For example, 0 is the "root". + */ + private void downHeap(long rootIndex, int parent) { + while (true) { + long parentIndex = rootIndex + parent; + int worst = parent; + long worstIndex = parentIndex; + int leftChild = parent * 2 + 1; + long leftIndex = rootIndex + leftChild; + if (leftChild < bucketSize) { + if (betterThan(values.get(worstIndex), values.get(leftIndex))) { + worst = leftChild; + worstIndex = leftIndex; + } + int rightChild = leftChild + 1; + long rightIndex = rootIndex + rightChild; + if (rightChild < bucketSize && betterThan(values.get(worstIndex), values.get(rightIndex))) { + worst = rightChild; + worstIndex = rightIndex; + } + } + if (worst == parent) { + break; + } + swap(worstIndex, parentIndex); + parent = worst; + } + } + + @Override + public final void close() { + Releasables.close(values, heapMode); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunctionTests.java new file mode 100644 index 0000000000000..f708038776032 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunctionTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.operator.SequenceDoubleBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; + +import java.util.List; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.contains; + +public class TopListDoubleAggregatorFunctionTests extends AggregatorFunctionTestCase { + private static final int LIMIT = 100; + + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceDoubleBlockSourceOperator(blockFactory, IntStream.range(0, size).mapToDouble(l -> randomDouble())); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new TopListDoubleAggregatorFunctionSupplier(inputChannels, LIMIT, true); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "top_list of doubles"; + } + + @Override + public void assertSimpleOutput(List input, Block result) { + Object[] values = input.stream().flatMapToDouble(b -> allDoubles(b)).sorted().limit(LIMIT).boxed().toArray(Object[]::new); + assertThat((List) BlockUtils.toJavaObject(result, 0), contains(values)); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunctionTests.java new file mode 100644 index 0000000000000..443604efd5c15 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunctionTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.operator.SequenceIntBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; + +import java.util.List; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.contains; + +public class TopListIntAggregatorFunctionTests extends AggregatorFunctionTestCase { + private static final int LIMIT = 100; + + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceIntBlockSourceOperator(blockFactory, IntStream.range(0, size).map(l -> randomInt())); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new TopListIntAggregatorFunctionSupplier(inputChannels, LIMIT, true); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "top_list of ints"; + } + + @Override + public void assertSimpleOutput(List input, Block result) { + Object[] values = input.stream().flatMapToInt(b -> allInts(b)).sorted().limit(LIMIT).boxed().toArray(Object[]::new); + assertThat((List) BlockUtils.toJavaObject(result, 0), contains(values)); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunctionTests.java new file mode 100644 index 0000000000000..4a6f101e573b8 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunctionTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.operator.SequenceLongBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; + +import java.util.List; +import java.util.stream.LongStream; + +import static org.hamcrest.Matchers.contains; + +public class TopListLongAggregatorFunctionTests extends AggregatorFunctionTestCase { + private static final int LIMIT = 100; + + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceLongBlockSourceOperator(blockFactory, LongStream.range(0, size).map(l -> randomLong())); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new TopListLongAggregatorFunctionSupplier(inputChannels, LIMIT, true); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "top_list of longs"; + } + + @Override + public void assertSimpleOutput(List input, Block result) { + Object[] values = input.stream().flatMapToLong(b -> allLongs(b)).sorted().limit(LIMIT).boxed().toArray(Object[]::new); + assertThat((List) BlockUtils.toJavaObject(result, 0), contains(values)); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/BucketedSortTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/BucketedSortTestCase.java new file mode 100644 index 0000000000000..9e1bc145ad4ca --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/BucketedSortTestCase.java @@ -0,0 +1,368 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data.sort; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.TestBlockFactory; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public abstract class BucketedSortTestCase extends ESTestCase { + /** + * Build a {@link T} to test. Sorts built by this method shouldn't need scores. + */ + protected abstract T build(SortOrder sortOrder, int bucketSize); + + /** + * Build the expected correctly typed value for a value. + */ + protected abstract Object expectedValue(double v); + + /** + * A random value for testing, with the appropriate precision for the type we're testing. + */ + protected abstract double randomValue(); + + /** + * Collect a value into the sort. + * @param value value to collect, always sent as double just to have + * a number to test. Subclasses should cast to their favorite types + */ + protected abstract void collect(T sort, double value, int bucket); + + protected abstract void merge(T sort, int groupId, T other, int otherGroupId); + + protected abstract Block toBlock(T sort, BlockFactory blockFactory, IntVector selected); + + protected abstract void assertBlockTypeAndValues(Block block, Object... values); + + public final void testNeverCalled() { + SortOrder order = randomFrom(SortOrder.values()); + try (T sort = build(order, 1)) { + assertBlock(sort, randomNonNegativeInt()); + } + } + + public final void testSingleDoc() { + try (T sort = build(randomFrom(SortOrder.values()), 1)) { + collect(sort, 1, 0); + + assertBlock(sort, 0, expectedValue(1)); + } + } + + public final void testNonCompetitive() { + try (T sort = build(SortOrder.DESC, 1)) { + collect(sort, 2, 0); + collect(sort, 1, 0); + + assertBlock(sort, 0, expectedValue(2)); + } + } + + public final void testCompetitive() { + try (T sort = build(SortOrder.DESC, 1)) { + collect(sort, 1, 0); + collect(sort, 2, 0); + + assertBlock(sort, 0, expectedValue(2)); + } + } + + public final void testNegativeValue() { + try (T sort = build(SortOrder.DESC, 1)) { + collect(sort, -1, 0); + assertBlock(sort, 0, expectedValue(-1)); + } + } + + public final void testSomeBuckets() { + try (T sort = build(SortOrder.DESC, 1)) { + collect(sort, 2, 0); + collect(sort, 2, 1); + collect(sort, 2, 2); + collect(sort, 3, 0); + + assertBlock(sort, 0, expectedValue(3)); + assertBlock(sort, 1, expectedValue(2)); + assertBlock(sort, 2, expectedValue(2)); + assertBlock(sort, 3); + } + } + + public final void testBucketGaps() { + try (T sort = build(SortOrder.DESC, 1)) { + collect(sort, 2, 0); + collect(sort, 2, 2); + + assertBlock(sort, 0, expectedValue(2)); + assertBlock(sort, 1); + assertBlock(sort, 2, expectedValue(2)); + assertBlock(sort, 3); + } + } + + public final void testBucketsOutOfOrder() { + try (T sort = build(SortOrder.DESC, 1)) { + collect(sort, 2, 1); + collect(sort, 2, 0); + + assertBlock(sort, 0, expectedValue(2.0)); + assertBlock(sort, 1, expectedValue(2.0)); + assertBlock(sort, 2); + } + } + + public final void testManyBuckets() { + // Collect the buckets in random order + int[] buckets = new int[10000]; + for (int b = 0; b < buckets.length; b++) { + buckets[b] = b; + } + Collections.shuffle(Arrays.asList(buckets), random()); + + double[] maxes = new double[buckets.length]; + + try (T sort = build(SortOrder.DESC, 1)) { + for (int b : buckets) { + maxes[b] = 2; + collect(sort, 2, b); + if (randomBoolean()) { + maxes[b] = 3; + collect(sort, 3, b); + } + if (randomBoolean()) { + collect(sort, -1, b); + } + } + for (int b = 0; b < buckets.length; b++) { + assertBlock(sort, b, expectedValue(maxes[b])); + } + assertBlock(sort, buckets.length); + } + } + + public final void testTwoHitsDesc() { + try (T sort = build(SortOrder.DESC, 2)) { + collect(sort, 1, 0); + collect(sort, 2, 0); + collect(sort, 3, 0); + + assertBlock(sort, 0, expectedValue(3), expectedValue(2)); + } + } + + public final void testTwoHitsAsc() { + try (T sort = build(SortOrder.ASC, 2)) { + collect(sort, 1, 0); + collect(sort, 2, 0); + collect(sort, 3, 0); + + assertBlock(sort, 0, expectedValue(1), expectedValue(2)); + } + } + + public final void testTwoHitsTwoBucket() { + try (T sort = build(SortOrder.DESC, 2)) { + collect(sort, 1, 0); + collect(sort, 1, 1); + collect(sort, 2, 0); + collect(sort, 2, 1); + collect(sort, 3, 0); + collect(sort, 3, 1); + collect(sort, 4, 1); + + assertBlock(sort, 0, expectedValue(3), expectedValue(2)); + assertBlock(sort, 1, expectedValue(4), expectedValue(3)); + } + } + + public final void testManyBucketsManyHits() { + // Set the values in random order + double[] values = new double[10000]; + for (int v = 0; v < values.length; v++) { + values[v] = randomValue(); + } + Collections.shuffle(Arrays.asList(values), random()); + + int buckets = between(2, 100); + int bucketSize = between(2, 100); + try (T sort = build(SortOrder.DESC, bucketSize)) { + BitArray[] bucketUsed = new BitArray[buckets]; + Arrays.setAll(bucketUsed, i -> new BitArray(values.length, bigArrays())); + for (int doc = 0; doc < values.length; doc++) { + for (int bucket = 0; bucket < buckets; bucket++) { + if (randomBoolean()) { + bucketUsed[bucket].set(doc); + collect(sort, values[doc], bucket); + } + } + } + for (int bucket = 0; bucket < buckets; bucket++) { + List bucketValues = new ArrayList<>(values.length); + for (int doc = 0; doc < values.length; doc++) { + if (bucketUsed[bucket].get(doc)) { + bucketValues.add(values[doc]); + } + } + bucketUsed[bucket].close(); + assertBlock( + sort, + bucket, + bucketValues.stream().sorted((lhs, rhs) -> rhs.compareTo(lhs)).limit(bucketSize).map(this::expectedValue).toArray() + ); + } + assertBlock(sort, buckets); + } + } + + public final void testMergeHeapToHeap() { + try (T sort = build(SortOrder.ASC, 3)) { + collect(sort, 1, 0); + collect(sort, 2, 0); + collect(sort, 3, 0); + + try (T other = build(SortOrder.ASC, 3)) { + collect(other, 1, 0); + collect(other, 2, 0); + collect(other, 3, 0); + + merge(sort, 0, other, 0); + } + + assertBlock(sort, 0, expectedValue(1), expectedValue(1), expectedValue(2)); + } + } + + public final void testMergeNoHeapToNoHeap() { + try (T sort = build(SortOrder.ASC, 3)) { + collect(sort, 1, 0); + collect(sort, 2, 0); + + try (T other = build(SortOrder.ASC, 3)) { + collect(other, 1, 0); + collect(other, 2, 0); + + merge(sort, 0, other, 0); + } + + assertBlock(sort, 0, expectedValue(1), expectedValue(1), expectedValue(2)); + } + } + + public final void testMergeHeapToNoHeap() { + try (T sort = build(SortOrder.ASC, 3)) { + collect(sort, 1, 0); + collect(sort, 2, 0); + + try (T other = build(SortOrder.ASC, 3)) { + collect(other, 1, 0); + collect(other, 2, 0); + collect(other, 3, 0); + + merge(sort, 0, other, 0); + } + + assertBlock(sort, 0, expectedValue(1), expectedValue(1), expectedValue(2)); + } + } + + public final void testMergeNoHeapToHeap() { + try (T sort = build(SortOrder.ASC, 3)) { + collect(sort, 1, 0); + collect(sort, 2, 0); + collect(sort, 3, 0); + + try (T other = build(SortOrder.ASC, 3)) { + collect(sort, 1, 0); + collect(sort, 2, 0); + + merge(sort, 0, other, 0); + } + + assertBlock(sort, 0, expectedValue(1), expectedValue(1), expectedValue(2)); + } + } + + public final void testMergeHeapToEmpty() { + try (T sort = build(SortOrder.ASC, 3)) { + try (T other = build(SortOrder.ASC, 3)) { + collect(other, 1, 0); + collect(other, 2, 0); + collect(other, 3, 0); + + merge(sort, 0, other, 0); + } + + assertBlock(sort, 0, expectedValue(1), expectedValue(2), expectedValue(3)); + } + } + + public final void testMergeEmptyToHeap() { + try (T sort = build(SortOrder.ASC, 3)) { + collect(sort, 1, 0); + collect(sort, 2, 0); + collect(sort, 3, 0); + + try (T other = build(SortOrder.ASC, 3)) { + merge(sort, 0, other, 0); + } + + assertBlock(sort, 0, expectedValue(1), expectedValue(2), expectedValue(3)); + } + } + + public final void testMergeEmptyToEmpty() { + try (T sort = build(SortOrder.ASC, 3)) { + try (T other = build(SortOrder.ASC, 3)) { + merge(sort, 0, other, randomNonNegativeInt()); + } + + assertBlock(sort, 0); + } + } + + private void assertBlock(T sort, int groupId, Object... values) { + var blockFactory = TestBlockFactory.getNonBreakingInstance(); + + try (var intVector = blockFactory.newConstantIntVector(groupId, 1)) { + var block = toBlock(sort, blockFactory, intVector); + + assertThat(block.getPositionCount(), equalTo(1)); + assertThat(block.getTotalValueCount(), equalTo(values.length)); + + if (values.length == 0) { + assertThat(block.elementType(), equalTo(ElementType.NULL)); + assertThat(block.isNull(0), equalTo(true)); + } else { + assertBlockTypeAndValues(block, values); + } + } + } + + protected final BigArrays bigArrays() { + return new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/DoubleBucketedSortTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/DoubleBucketedSortTests.java new file mode 100644 index 0000000000000..43b5caa092b9a --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/DoubleBucketedSortTests.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data.sort; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.search.sort.SortOrder; + +import static org.hamcrest.Matchers.equalTo; + +public class DoubleBucketedSortTests extends BucketedSortTestCase { + @Override + protected DoubleBucketedSort build(SortOrder sortOrder, int bucketSize) { + return new DoubleBucketedSort(bigArrays(), sortOrder, bucketSize); + } + + @Override + protected Object expectedValue(double v) { + return v; + } + + @Override + protected double randomValue() { + return randomDoubleBetween(Double.MIN_VALUE, Double.MAX_VALUE, true); + } + + @Override + protected void collect(DoubleBucketedSort sort, double value, int bucket) { + sort.collect(value, bucket); + } + + @Override + protected void merge(DoubleBucketedSort sort, int groupId, DoubleBucketedSort other, int otherGroupId) { + sort.merge(groupId, other, otherGroupId); + } + + @Override + protected Block toBlock(DoubleBucketedSort sort, BlockFactory blockFactory, IntVector selected) { + return sort.toBlock(blockFactory, selected); + } + + @Override + protected void assertBlockTypeAndValues(Block block, Object... values) { + assertThat(block.elementType(), equalTo(ElementType.DOUBLE)); + var typedBlock = (DoubleBlock) block; + for (int i = 0; i < values.length; i++) { + assertThat(typedBlock.getDouble(i), equalTo(values[i])); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/IntBucketedSortTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/IntBucketedSortTests.java new file mode 100644 index 0000000000000..70d0a79ea7473 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/IntBucketedSortTests.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data.sort; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.search.sort.SortOrder; + +import static org.hamcrest.Matchers.equalTo; + +public class IntBucketedSortTests extends BucketedSortTestCase { + @Override + protected IntBucketedSort build(SortOrder sortOrder, int bucketSize) { + return new IntBucketedSort(bigArrays(), sortOrder, bucketSize); + } + + @Override + protected Object expectedValue(double v) { + return (int) v; + } + + @Override + protected double randomValue() { + return randomInt(); + } + + @Override + protected void collect(IntBucketedSort sort, double value, int bucket) { + sort.collect((int) value, bucket); + } + + @Override + protected void merge(IntBucketedSort sort, int groupId, IntBucketedSort other, int otherGroupId) { + sort.merge(groupId, other, otherGroupId); + } + + @Override + protected Block toBlock(IntBucketedSort sort, BlockFactory blockFactory, IntVector selected) { + return sort.toBlock(blockFactory, selected); + } + + @Override + protected void assertBlockTypeAndValues(Block block, Object... values) { + assertThat(block.elementType(), equalTo(ElementType.INT)); + var typedBlock = (IntBlock) block; + for (int i = 0; i < values.length; i++) { + assertThat(typedBlock.getInt(i), equalTo(values[i])); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/LongBucketedSortTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/LongBucketedSortTests.java new file mode 100644 index 0000000000000..bceed3b1d95b5 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/LongBucketedSortTests.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data.sort; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.search.sort.SortOrder; + +import static org.hamcrest.Matchers.equalTo; + +public class LongBucketedSortTests extends BucketedSortTestCase { + @Override + protected LongBucketedSort build(SortOrder sortOrder, int bucketSize) { + return new LongBucketedSort(bigArrays(), sortOrder, bucketSize); + } + + @Override + protected Object expectedValue(double v) { + return (long) v; + } + + @Override + protected double randomValue() { + // 2L^50 fits in the mantisa of a double which the test sort of needs. + return randomLongBetween(-2L ^ 50, 2L ^ 50); + } + + @Override + protected void collect(LongBucketedSort sort, double value, int bucket) { + sort.collect((long) value, bucket); + } + + @Override + protected void merge(LongBucketedSort sort, int groupId, LongBucketedSort other, int otherGroupId) { + sort.merge(groupId, other, otherGroupId); + } + + @Override + protected Block toBlock(LongBucketedSort sort, BlockFactory blockFactory, IntVector selected) { + return sort.toBlock(blockFactory, selected); + } + + @Override + protected void assertBlockTypeAndValues(Block block, Object... values) { + assertThat(block.elementType(), equalTo(ElementType.LONG)); + var typedBlock = (LongBlock) block; + for (int i = 0; i < values.length; i++) { + assertThat(typedBlock.getLong(i), equalTo(values[i])); + } + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec index 2cdd5c1dfd931..0fb35b4253d6d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec @@ -38,10 +38,10 @@ double e() "double log(?base:integer|unsigned_long|long|double, number:integer|unsigned_long|long|double)" "double log10(number:double|integer|long|unsigned_long)" "keyword|text ltrim(string:keyword|text)" -"double|integer|long max(number:double|integer|long)" +"double|integer|long|date max(number:double|integer|long|date)" "double|integer|long median(number:double|integer|long)" "double|integer|long median_absolute_deviation(number:double|integer|long)" -"double|integer|long min(number:double|integer|long)" +"double|integer|long|date min(number:double|integer|long|date)" "boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version mv_append(field1:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version, field2:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version)" "double mv_avg(number:double|integer|long|unsigned_long)" "keyword mv_concat(string:text|keyword, delim:text|keyword)" @@ -109,6 +109,7 @@ double tau() "keyword|text to_upper(str:keyword|text)" "version to_ver(field:keyword|text|version)" "version to_version(field:keyword|text|version)" +"double|integer|long|date top_list(field:double|integer|long|date, limit:integer, order:keyword)" "keyword|text trim(string:keyword|text)" "boolean|date|double|integer|ip|keyword|long|text|version values(field:boolean|date|double|integer|ip|keyword|long|text|version)" ; @@ -155,10 +156,10 @@ locate |[string, substring, start] |["keyword|text", "keyword|te log |[base, number] |["integer|unsigned_long|long|double", "integer|unsigned_long|long|double"] |["Base of logarithm. If `null`\, the function returns `null`. If not provided\, this function returns the natural logarithm (base e) of a value.", "Numeric expression. If `null`\, the function returns `null`."] log10 |number |"double|integer|long|unsigned_long" |Numeric expression. If `null`, the function returns `null`. ltrim |string |"keyword|text" |String expression. If `null`, the function returns `null`. -max |number |"double|integer|long" |[""] +max |number |"double|integer|long|date" |[""] median |number |"double|integer|long" |[""] median_absolut|number |"double|integer|long" |[""] -min |number |"double|integer|long" |[""] +min |number |"double|integer|long|date" |[""] mv_append |[field1, field2] |["boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version", "boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version"] | ["", ""] mv_avg |number |"double|integer|long|unsigned_long" |Multivalue expression. mv_concat |[string, delim] |["text|keyword", "text|keyword"] |[Multivalue expression., Delimiter.] @@ -226,6 +227,7 @@ to_unsigned_lo|field |"boolean|date|keyword|text|d to_upper |str |"keyword|text" |String expression. If `null`, the function returns `null`. to_ver |field |"keyword|text|version" |Input value. The input can be a single- or multi-valued column or an expression. to_version |field |"keyword|text|version" |Input value. The input can be a single- or multi-valued column or an expression. +top_list |[field, limit, order] |["double|integer|long|date", integer, keyword] |[The field to collect the top values for.,The maximum number of values to collect.,The order to calculate the top values. Either `asc` or `desc`.] trim |string |"keyword|text" |String expression. If `null`, the function returns `null`. values |field |"boolean|date|double|integer|ip|keyword|long|text|version" |[""] ; @@ -344,6 +346,7 @@ to_unsigned_lo|Converts an input value to an unsigned long value. If the input p to_upper |Returns a new string representing the input string converted to upper case. to_ver |Converts an input string to a version value. to_version |Converts an input string to a version value. +top_list |Collects the top values for a field. Includes repeated values. trim |Removes leading and trailing whitespaces from a string. values |Collect values for a field. ; @@ -392,10 +395,10 @@ locate |integer log |double |[true, false] |false |false log10 |double |false |false |false ltrim |"keyword|text" |false |false |false -max |"double|integer|long" |false |false |true +max |"double|integer|long|date" |false |false |true median |"double|integer|long" |false |false |true median_absolut|"double|integer|long" |false |false |true -min |"double|integer|long" |false |false |true +min |"double|integer|long|date" |false |false |true mv_append |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version" |[false, false] |false |false mv_avg |double |false |false |false mv_concat |keyword |[false, false] |false |false @@ -463,6 +466,7 @@ to_unsigned_lo|unsigned_long to_upper |"keyword|text" |false |false |false to_ver |version |false |false |false to_version |version |false |false |false +top_list |"double|integer|long|date" |[false, false, false] |false |true trim |"keyword|text" |false |false |false values |"boolean|date|double|integer|ip|keyword|long|text|version" |false |false |true ; @@ -483,5 +487,5 @@ countFunctions#[skip:-8.14.99, reason:BIN added] meta functions | stats a = count(*), b = count(*), c = count(*) | mv_expand c; a:long | b:long | c:long -109 | 109 | 109 +110 | 110 | 110 ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top_list.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top_list.csv-spec new file mode 100644 index 0000000000000..c24f6a7e70954 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top_list.csv-spec @@ -0,0 +1,156 @@ +topList +required_capability: agg_top_list +// tag::top-list[] +FROM employees +| STATS top_salaries = TOP_LIST(salary, 3, "desc"), top_salary = MAX(salary) +// end::top-list[] +; + +// tag::top-list-result[] +top_salaries:integer | top_salary:integer +[74999, 74970, 74572] | 74999 +// end::top-list-result[] +; + +topListAllTypesAsc +required_capability: agg_top_list +FROM employees +| STATS + date = TOP_LIST(hire_date, 2, "asc"), + double = TOP_LIST(salary_change, 2, "asc"), + integer = TOP_LIST(salary, 2, "asc"), + long = TOP_LIST(salary_change.long, 2, "asc") +; + +date:date | double:double | integer:integer | long:long +[1985-02-18T00:00:00.000Z,1985-02-24T00:00:00.000Z] | [-9.81,-9.28] | [25324,25945] | [-9,-9] +; + +topListAllTypesDesc +required_capability: agg_top_list +FROM employees +| STATS + date = TOP_LIST(hire_date, 2, "desc"), + double = TOP_LIST(salary_change, 2, "desc"), + integer = TOP_LIST(salary, 2, "desc"), + long = TOP_LIST(salary_change.long, 2, "desc") +; + +date:date | double:double | integer:integer | long:long +[1999-04-30T00:00:00.000Z,1997-05-19T00:00:00.000Z] | [14.74,14.68] | [74999,74970] | [14,14] +; + +topListAllTypesRow +required_capability: agg_top_list +ROW + constant_date=TO_DATETIME("1985-02-18T00:00:00.000Z"), + constant_double=-9.81, + constant_integer=25324, + constant_long=TO_LONG(-9) +| STATS + date = TOP_LIST(constant_date, 2, "asc"), + double = TOP_LIST(constant_double, 2, "asc"), + integer = TOP_LIST(constant_integer, 2, "asc"), + long = TOP_LIST(constant_long, 2, "asc") +| keep date, double, integer, long +; + +date:date | double:double | integer:integer | long:long +1985-02-18T00:00:00.000Z | -9.81 | 25324 | -9 +; + +topListSomeBuckets +required_capability: agg_top_list +FROM employees +| STATS top_salary = TOP_LIST(salary, 2, "desc") by still_hired +| sort still_hired asc +; + +top_salary:integer | still_hired:boolean +[74999,74970] | false +[74572,73578] | true +; + +topListManyBuckets +required_capability: agg_top_list +FROM employees +| STATS top_salary = TOP_LIST(salary, 2, "desc") by x=emp_no, y=emp_no+1 +| sort x asc +| limit 3 +; + +top_salary:integer | x:integer | y:integer +57305 | 10001 | 10002 +56371 | 10002 | 10003 +61805 | 10003 | 10004 +; + +topListMultipleStats +required_capability: agg_top_list +FROM employees +| STATS top_salary = TOP_LIST(salary, 1, "desc") by emp_no +| STATS top_salary = TOP_LIST(top_salary, 3, "asc") +; + +top_salary:integer +[25324,25945,25976] +; + +topListAllTypesMin +required_capability: agg_top_list +FROM employees +| STATS + date = TOP_LIST(hire_date, 1, "asc"), + double = TOP_LIST(salary_change, 1, "asc"), + integer = TOP_LIST(salary, 1, "asc"), + long = TOP_LIST(salary_change.long, 1, "asc") +; + +date:date | double:double | integer:integer | long:long +1985-02-18T00:00:00.000Z | -9.81 | 25324 | -9 +; + +topListAllTypesMax +required_capability: agg_top_list +FROM employees +| STATS + date = TOP_LIST(hire_date, 1, "desc"), + double = TOP_LIST(salary_change, 1, "desc"), + integer = TOP_LIST(salary, 1, "desc"), + long = TOP_LIST(salary_change.long, 1, "desc") +; + +date:date | double:double | integer:integer | long:long +1999-04-30T00:00:00.000Z | 14.74 | 74999 | 14 +; + +topListAscDesc +required_capability: agg_top_list +FROM employees +| STATS top_asc = TOP_LIST(salary, 3, "asc"), top_desc = TOP_LIST(salary, 3, "desc") +; + +top_asc:integer | top_desc:integer +[25324, 25945, 25976] | [74999, 74970, 74572] +; + +topListEmpty +required_capability: agg_top_list +FROM employees +| WHERE salary < 0 +| STATS top = TOP_LIST(salary, 3, "asc") +; + +top:integer +null +; + +topListDuplicates +required_capability: agg_top_list +FROM employees +| STATS integer = TOP_LIST(languages, 2, "desc") +; + +integer:integer +[5, 5] +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 3eef9f7356b39..e65f574422dd5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -42,6 +42,11 @@ public class EsqlCapabilities { */ private static final String FN_SUBSTRING_EMPTY_NULL = "fn_substring_empty_null"; + /** + * Support for aggregation function {@code TOP_LIST}. + */ + private static final String AGG_TOP_LIST = "agg_top_list"; + /** * Optimization for ST_CENTROID changed some results in cartesian data. #108713 */ @@ -84,6 +89,7 @@ private static Set capabilities() { caps.add(FN_CBRT); caps.add(FN_IP_PREFIX); caps.add(FN_SUBSTRING_EMPTY_NULL); + caps.add(AGG_TOP_LIST); caps.add(ST_CENTROID_AGG_OPTIMIZED); caps.add(METADATA_IGNORED_FIELD); caps.add(FN_MV_APPEND); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index 8fd6ebe8d7d69..7034f23be1662 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.Percentile; import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroid; import org.elasticsearch.xpack.esql.expression.function.aggregate.Sum; +import org.elasticsearch.xpack.esql.expression.function.aggregate.TopList; import org.elasticsearch.xpack.esql.expression.function.aggregate.Values; import org.elasticsearch.xpack.esql.expression.function.grouping.Bucket; import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Case; @@ -192,6 +193,7 @@ private FunctionDefinition[][] functions() { def(Min.class, Min::new, "min"), def(Percentile.class, Percentile::new, "percentile"), def(Sum.class, Sum::new, "sum"), + def(TopList.class, TopList::new, "top_list"), def(Values.class, Values::new, "values") }, // math new FunctionDefinition[] { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java index 3f6632f66bcee..1c1139c197ac0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java @@ -24,8 +24,12 @@ public class Max extends NumericAggregate implements SurrogateExpression { - @FunctionInfo(returnType = { "double", "integer", "long" }, description = "The maximum value of a numeric field.", isAggregation = true) - public Max(Source source, @Param(name = "number", type = { "double", "integer", "long" }) Expression field) { + @FunctionInfo( + returnType = { "double", "integer", "long", "date" }, + description = "The maximum value of a numeric field.", + isAggregation = true + ) + public Max(Source source, @Param(name = "number", type = { "double", "integer", "long", "date" }) Expression field) { super(source, field); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java index 16821752bc7b8..ecfc2200a3643 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java @@ -24,8 +24,12 @@ public class Min extends NumericAggregate implements SurrogateExpression { - @FunctionInfo(returnType = { "double", "integer", "long" }, description = "The minimum value of a numeric field.", isAggregation = true) - public Min(Source source, @Param(name = "number", type = { "double", "integer", "long" }) Expression field) { + @FunctionInfo( + returnType = { "double", "integer", "long", "date" }, + description = "The minimum value of a numeric field.", + isAggregation = true + ) + public Min(Source source, @Param(name = "number", type = { "double", "integer", "long", "date" }) Expression field) { super(source, field); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java index b003b981c0709..390cd0d68018e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java @@ -19,6 +19,28 @@ import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; +/** + * Aggregate function that receives a numeric, signed field, and returns a single double value. + *

    + * Implement the supplier methods to return the correct {@link AggregatorFunctionSupplier}. + *

    + *

    + * Some methods can be optionally overridden to support different variations: + *

    + *
      + *
    • + * {@link #supportsDates}: override to also support dates. Defaults to false. + *
    • + *
    • + * {@link #resolveType}: override to support different parameters. + * Call {@code super.resolveType()} to add extra checks. + *
    • + *
    • + * {@link #dataType}: override to return a different datatype. + * You can return {@code field().dataType()} to propagate the parameter type. + *
    • + *
    + */ public abstract class NumericAggregate extends AggregateFunction implements ToAggregator { NumericAggregate(Source source, Expression field, List parameters) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopList.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopList.java new file mode 100644 index 0000000000000..79893b1c7de07 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopList.java @@ -0,0 +1,181 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.TopListDoubleAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.TopListIntAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.TopListLongAggregatorFunctionSupplier; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.SurrogateExpression; +import org.elasticsearch.xpack.esql.expression.function.Example; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import org.elasticsearch.xpack.esql.planner.ToAggregator; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.THIRD; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isFoldable; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; + +public class TopList extends AggregateFunction implements ToAggregator, SurrogateExpression { + private static final String ORDER_ASC = "ASC"; + private static final String ORDER_DESC = "DESC"; + + @FunctionInfo( + returnType = { "double", "integer", "long", "date" }, + description = "Collects the top values for a field. Includes repeated values.", + isAggregation = true, + examples = @Example(file = "stats_top_list", tag = "top-list") + ) + public TopList( + Source source, + @Param( + name = "field", + type = { "double", "integer", "long", "date" }, + description = "The field to collect the top values for." + ) Expression field, + @Param(name = "limit", type = { "integer" }, description = "The maximum number of values to collect.") Expression limit, + @Param( + name = "order", + type = { "keyword" }, + description = "The order to calculate the top values. Either `asc` or `desc`." + ) Expression order + ) { + super(source, field, Arrays.asList(limit, order)); + } + + public static TopList readFrom(PlanStreamInput in) throws IOException { + return new TopList(Source.readFrom(in), in.readExpression(), in.readExpression(), in.readExpression()); + } + + public void writeTo(PlanStreamOutput out) throws IOException { + source().writeTo(out); + List fields = children(); + assert fields.size() == 3; + out.writeExpression(fields.get(0)); + out.writeExpression(fields.get(1)); + out.writeExpression(fields.get(2)); + } + + private Expression limitField() { + return parameters().get(0); + } + + private Expression orderField() { + return parameters().get(1); + } + + private int limitValue() { + return (int) limitField().fold(); + } + + private String orderRawValue() { + return BytesRefs.toString(orderField().fold()); + } + + private boolean orderValue() { + return orderRawValue().equalsIgnoreCase(ORDER_ASC); + } + + @Override + protected TypeResolution resolveType() { + if (childrenResolved() == false) { + return new TypeResolution("Unresolved children"); + } + + var typeResolution = isType( + field(), + dt -> dt == DataType.DATETIME || dt.isNumeric() && dt != DataType.UNSIGNED_LONG, + sourceText(), + FIRST, + "numeric except unsigned_long or counter types" + ).and(isFoldable(limitField(), sourceText(), SECOND)) + .and(isType(limitField(), dt -> dt == DataType.INTEGER, sourceText(), SECOND, "integer")) + .and(isFoldable(orderField(), sourceText(), THIRD)) + .and(isString(orderField(), sourceText(), THIRD)); + + if (typeResolution.unresolved()) { + return typeResolution; + } + + var limit = limitValue(); + var order = orderRawValue(); + + if (limit <= 0) { + return new TypeResolution(format(null, "Limit must be greater than 0 in [{}], found [{}]", sourceText(), limit)); + } + + if (order.equalsIgnoreCase(ORDER_ASC) == false && order.equalsIgnoreCase(ORDER_DESC) == false) { + return new TypeResolution( + format(null, "Invalid order value in [{}], expected [{}, {}] but got [{}]", sourceText(), ORDER_ASC, ORDER_DESC, order) + ); + } + + return TypeResolution.TYPE_RESOLVED; + } + + @Override + public DataType dataType() { + return field().dataType(); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, TopList::new, children().get(0), children().get(1), children().get(2)); + } + + @Override + public TopList replaceChildren(List newChildren) { + return new TopList(source(), newChildren.get(0), newChildren.get(1), newChildren.get(2)); + } + + @Override + public AggregatorFunctionSupplier supplier(List inputChannels) { + DataType type = field().dataType(); + if (type == DataType.LONG || type == DataType.DATETIME) { + return new TopListLongAggregatorFunctionSupplier(inputChannels, limitValue(), orderValue()); + } + if (type == DataType.INTEGER) { + return new TopListIntAggregatorFunctionSupplier(inputChannels, limitValue(), orderValue()); + } + if (type == DataType.DOUBLE) { + return new TopListDoubleAggregatorFunctionSupplier(inputChannels, limitValue(), orderValue()); + } + throw EsqlIllegalArgumentException.illegalDataType(type); + } + + @Override + public Expression surrogate() { + var s = source(); + + if (limitValue() == 1) { + if (orderValue()) { + return new Min(s, field()); + } else { + return new Max(s, field()); + } + } + + return null; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java new file mode 100644 index 0000000000000..a99c7a8b7ac8d --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java @@ -0,0 +1,176 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +/** + * Functions that aggregate values, with or without grouping within buckets. + * Used in `STATS` and similar commands. + * + *

    Guide to adding new aggregate function

    + *
      + *
    1. + * Aggregation functions are more complex than scalar functions, so it's a good idea to discuss + * the new function with the ESQL team before starting to implement it. + *

      + * You may also discuss its implementation, as aggregations may require special performance considerations. + *

      + *
    2. + *
    3. + * To learn the basics about making functions, check {@link org.elasticsearch.xpack.esql.expression.function.scalar}. + *

      + * It has the guide to making a simple function, which should be a good base to start doing aggregations. + *

      + *
    4. + *
    5. + * Pick one of the csv-spec files in {@code x-pack/plugin/esql/qa/testFixtures/src/main/resources/} + * and add a test for the function you want to write. These files are roughly themed but there + * isn't a strong guiding principle in the organization. + *
    6. + *
    7. + * Rerun the {@code CsvTests} and watch your new test fail. + *
    8. + *
    9. + * Find an aggregate function in this package similar to the one you are working on and copy it to build + * yours. + * Your function might extend from the available abstract classes. Check the javadoc of each before using them: + *
        + *
      • + * {@link org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction}: The base class for aggregates + *
      • + *
      • + * {@link org.elasticsearch.xpack.esql.expression.function.aggregate.NumericAggregate}: Aggregation for numeric values + *
      • + *
      • + * {@link org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialAggregateFunction}: + * Aggregation for spatial values + *
      • + *
      + *
    10. + *
    11. + * Fill the required methods in your new function. Check their JavaDoc for more information. + * Here are some of the important ones: + *
        + *
      • + * Constructor: Review the constructor annotations, and make sure to add the correct types and descriptions. + *
          + *
        • {@link org.elasticsearch.xpack.esql.expression.function.FunctionInfo}, for the constructor itself
        • + *
        • {@link org.elasticsearch.xpack.esql.expression.function.Param}, for the function parameters
        • + *
        + *
      • + *
      • + * {@code resolveType}: Check the metadata of your function parameters. + * This may include types, whether they are foldable or not, or their possible values. + *
      • + *
      • + * {@code dataType}: This will return the datatype of your function. + * May be based on its current parameters. + *
      • + *
      + * + * Finally, you may want to implement some interfaces. + * Check their JavaDocs to see if they are suitable for your function: + *
        + *
      • + * {@link org.elasticsearch.xpack.esql.planner.ToAggregator}: (More information about aggregators below) + *
      • + *
      • + * {@link org.elasticsearch.xpack.esql.expression.SurrogateExpression} + *
      • + *
      + *
    12. + *
    13. + * To introduce your aggregation to the engine: + *
        + *
      • + * Add it to {@code org.elasticsearch.xpack.esql.planner.AggregateMapper}. + * Check all usages of other aggregations there, and replicate the logic. + *
      • + *
      • + * Add it to {@link org.elasticsearch.xpack.esql.io.stream.PlanNamedTypes}. + * Consider adding a {@code writeTo} method and a constructor/{@code readFrom} method inside your function, + * to keep all the logic in one place. + *

        + * You can find examples of other aggregations using this method, + * like {@link org.elasticsearch.xpack.esql.expression.function.aggregate.TopList#writeTo(PlanStreamOutput)} + *

        + *
      • + *
      • + * Do the same with {@link org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry}. + *
      • + *
      + *
    14. + *
    + * + *

    Creating aggregators for your function

    + *

    + * Aggregators contain the core logic of your aggregation. That is, how to combine values, what to store, how to process data, etc. + *

    + *
      + *
    1. + * Copy an existing aggregator to use as a base. You'll usually make one per type. Check other classes to see the naming pattern. + * You can find them in {@link org.elasticsearch.compute.aggregation}. + *

      + * Note that some aggregators are autogenerated, so they live in different directories. + * The base is {@code x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/} + *

      + *
    2. + *
    3. + * Make a test for your aggregator. + * You can copy an existing one from {@code x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/}. + *

      + * Tests extending from {@code org.elasticsearch.compute.aggregation.AggregatorFunctionTestCase} + * will already include most required cases. You should only need to fill the required abstract methods. + *

      + *
    4. + *
    5. + * Check the Javadoc of the {@link org.elasticsearch.compute.ann.Aggregator} + * and {@link org.elasticsearch.compute.ann.GroupingAggregator} annotations. + * Add/Modify them on your aggregator. + *
    6. + *
    7. + * The {@link org.elasticsearch.compute.ann.Aggregator} JavaDoc explains the static methods you should add. + *
    8. + *
    9. + * After implementing the required methods (Even if they have a dummy implementation), + * run the CsvTests to generate some extra required classes. + *

      + * One of them will be the {@code AggregatorFunctionSupplier} for your aggregator. + * Find it by its name ({@code AggregatorFunctionSupplier}), + * and return it in the {@code toSupplier} method in your function, under the correct type condition. + *

      + *
    10. + *
    11. + * Now, complete the implementation of the aggregator, until the tests pass! + *
    12. + *
    + * + *

    StringTemplates

    + *

    + * Making an aggregator per type may be repetitive. To avoid code duplication, we use StringTemplates: + *

    + *
      + *
    1. + * Create a new StringTemplate file. + * Use another as a reference, like + * {@code x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-TopListAggregator.java.st}. + *
    2. + *
    3. + * Add the template scripts to {@code x-pack/plugin/esql/compute/build.gradle}. + *

      + * You can also see there which variables you can use, and which types are currently supported. + *

      + *
    4. + *
    5. + * After completing your template, run the generation with {@code ./gradlew :x-pack:plugin:esql:compute:compileJava}. + *

      + * You may need to tweak some import orders per type so they don't raise warnings. + *

      + *
    6. + *
    + */ +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index be5e105c3398e..831d105a89076 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -58,6 +58,7 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.Percentile; import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroid; import org.elasticsearch.xpack.esql.expression.function.aggregate.Sum; +import org.elasticsearch.xpack.esql.expression.function.aggregate.TopList; import org.elasticsearch.xpack.esql.expression.function.aggregate.Values; import org.elasticsearch.xpack.esql.expression.function.grouping.Bucket; import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingFunction; @@ -298,6 +299,7 @@ public static List namedTypeEntries() { of(AggregateFunction.class, Percentile.class, PlanNamedTypes::writePercentile, PlanNamedTypes::readPercentile), of(AggregateFunction.class, SpatialCentroid.class, PlanNamedTypes::writeAggFunction, PlanNamedTypes::readAggFunction), of(AggregateFunction.class, Sum.class, PlanNamedTypes::writeAggFunction, PlanNamedTypes::readAggFunction), + of(AggregateFunction.class, TopList.class, (out, prefix) -> prefix.writeTo(out), TopList::readFrom), of(AggregateFunction.class, Values.class, PlanNamedTypes::writeAggFunction, PlanNamedTypes::readAggFunction) ); List entries = new ArrayList<>(declared); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/package-info.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/package-info.java index 863476ba55686..0d45ce10b1966 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/package-info.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/package-info.java @@ -29,6 +29,7 @@ * functions, designed to run over a {@link org.elasticsearch.compute.data.Block}
  • *
  • {@link org.elasticsearch.xpack.esql.session.EsqlSession} - manages state across a query
  • *
  • {@link org.elasticsearch.xpack.esql.expression.function.scalar} - Guide to writing scalar functions
  • + *
  • {@link org.elasticsearch.xpack.esql.expression.function.aggregate} - Guide to writing aggregation functions
  • *
  • {@link org.elasticsearch.xpack.esql.analysis.Analyzer} - The first step in query processing
  • *
  • {@link org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer} - Coordinator level logical optimizations
  • *
  • {@link org.elasticsearch.xpack.esql.optimizer.LocalLogicalPlanOptimizer} - Data node level logical optimizations
  • diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java index 68e6ea4d6cadb..83fdd5dc0c5d2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java @@ -32,6 +32,7 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialAggregateFunction; import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroid; import org.elasticsearch.xpack.esql.expression.function.aggregate.Sum; +import org.elasticsearch.xpack.esql.expression.function.aggregate.TopList; import org.elasticsearch.xpack.esql.expression.function.aggregate.Values; import java.lang.invoke.MethodHandle; @@ -61,7 +62,8 @@ final class AggregateMapper { Percentile.class, SpatialCentroid.class, Sum.class, - Values.class + Values.class, + TopList.class ); /** Record of agg Class, type, and grouping (or non-grouping). */ @@ -143,6 +145,8 @@ private static Stream, Tuple>> typeAndNames(Class } else if (Values.class.isAssignableFrom(clazz)) { // TODO can't we figure this out from the function itself? types = List.of("Int", "Long", "Double", "Boolean", "BytesRef"); + } else if (TopList.class.isAssignableFrom(clazz)) { + types = List.of("Int", "Long", "Double"); } else { assert clazz == CountDistinct.class : "Expected CountDistinct, got: " + clazz; types = Stream.concat(NUMERIC.stream(), Stream.of("Boolean", "BytesRef")).toList(); From 379c02bae5df6641024d489b3d13c8ff6583462e Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 19 Jun 2024 16:06:27 +0100 Subject: [PATCH 36/44] AwaitsFix for #109932 --- .../compute/aggregation/ValuesIntAggregatorFunctionTests.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntAggregatorFunctionTests.java index 9d421c7801a43..46e31b589997a 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntAggregatorFunctionTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.aggregation; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockUtils; @@ -19,6 +20,7 @@ import static org.hamcrest.Matchers.containsInAnyOrder; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/109932") public class ValuesIntAggregatorFunctionTests extends AggregatorFunctionTestCase { @Override protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { From d1e3c0afc419a73bdb7b6305e0663f81e80cec2e Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Wed, 19 Jun 2024 18:01:39 +0200 Subject: [PATCH 37/44] ESQL: Union Types Support (#107545) * Union Types Support The second prototype replaced MultiTypeField.Unresolved with MultiTypeField, but this clashed with existing behaviour around mapping unused MultiTypeFields to `unsupported` and `null`, so this new attempt simply adds new fields, resulting in more than one field with the same name. We still need to store this new field in EsRelation, so that physical planner can insert it into FieldExtractExec, so this is quite similar to the second protototype. The following query works in this third prototype: ``` multiIndexIpString FROM sample_data* METADATA _index | EVAL client_ip = TO_IP(client_ip) | KEEP _index, @timestamp, client_ip, event_duration, message | SORT _index ASC, @timestamp DESC ``` As with the previous prototyep, we no longer need an aggregation to force the conversion function onto the data node, as the 'real' conversion is now done at field extraction time using the converter function previously saved in the EsRelation and replanned into the EsQueryExec. Support row-stride-reader for LoadFromMany Add missing ESQL version after rebase on main Fixed missing block release Simplify UnresolvedUnionTypes Support other commands, notably WHERE Update docs/changelog/107545.yaml Fix changelog Removed unused code Slight code reduction in analyser of union types Removed unused interface method Fix bug in copying blocks (array overrun) Convert MultiTypeEsField.UnresolvedField back to InvalidMappedField This is to ensure older behaviour still works. Simplify InvalidMappedField support Rather than complex code to recreate InvalidMappedField from MultiTypeEsField.UnresolvedField, we rely on the fact that this is the parent class anyway, so we can resolve this during plan serialization/deserialization anyway. Much simpler Simplify InvalidMappedField support further Combining InvalidMappedField and MultiTypeEsField.UnresolvedField into one class simplifies plan serialization even further. InvalidMappedField is used slightly differently in QL We need to separate the aggregatable used in the original really-invalid mapped field from the aggregatable used if the field can indeed be used as a union-type in ES|QL. Updated version limitation after 8.14 branch Try debug CI failures in multi-node clusters Support type conversion in rowstride reader on single leaf Disable union_types from CsvTests Keep track of per-shard converters for LoadFromMany Simplify block loader convert function Code cleanup Added unit test for ValuesSourceReaderOperator including field type conversions at block loading Added test for @timestamp and fixed related bug It turns out that most, but not all, DataType values have the same esType as typeName, and @timestamp is one that does not, using `date` for esType and `datetime` for typename. Our EsqlIndexResolver was recording multi-type fields with `esType`, while later the actual type conversion was using an evaluator that relied on DataTypes.typeFromName(typeName). So we fixed the EsqlIndexResolver to rather use typeName. Added more tests, with three indices combined and two type conversions Disable lucene-pushdown on union-type fields Since the union-type rewriter replaced conversion functions with new FieldAttributes, these were passing the check for being possible to push-down, which was incorrect. Now we prevent that. Set union-type aggregatable flag to false always This simplifies the push-down check. Fixed tests after rebase on main Add unit tests for union-types (same field, different type) Remove generic warnings Test code cleanup and clarifying comments Remove -IT_tests_only in favor of CsvTests assumeFalse Improved comment Code review updates Code review updates Remove changes to ql/EsRelation And it turned out the latest version of union type no longer needed these changes anyway, and was using the new EsRelation in the ESQL module without these changes. Port InvalidMappedField to ESQL Note, this extends the QL version of InvalidMappedField, so is not a complete port. This is necessary because of the intertwining of QL IndexResolver and EsqlIndexResolver. Once those classes are disentangled, we can completely break InvalidMappedField from QL and make it a forbidden type. Fix capabilities line after rebase on main Revert QL FieldAttribute and extend with ESQL FieldAttribute So as to remove any edits to QL code, we extend FieldAttribute in the ESQL code with the changes required, since is simply to include the `field` in the hascode and equals methods. Revert "Revert QL FieldAttribute and extend with ESQL FieldAttribute" This reverts commit 168c6c75436e26b83e083cd3de8e18062e116bc9. Switch UNION_TYPES from EsqlFeatures to EsqlCapabilities Make hashcode and equals aligned And removed unused method from earlier union-types work where we kept the NodeId during re-writing (which we no longer do). Replace required_feature with required_capability after rebase Switch union_types capability back to feature, because capabilities do not work in mixed clusters Revert "Switch union_types capability back to feature, because capabilities do not work in mixed clusters" This reverts commit 56d58bedf756dbad703c07bf4cdb991d4341c1ae. Added test for multiple columns from same fields Both IP and Date are tested Fix bug with incorrectly resolving invalid types And added more tests Fixed bug with multiple fields of same name This fix simply removes the original field already at the EsRelation level, which covers all test cases but has the side effect of having the final field no-longer be unsupported/null when the alias does not overwrite the field with the same name. This is not exactly the correct semantic intent. The original field name should be unsupported/null unless the user explicitly overwrote the name with `field=TO_TYPE(field)`, which effectively deletes the old field anyway. Fixed bug with multiple conversions of the same field This also fixes the issue with the previous fix that incorrectly reported the converted type for the original field. More tests with multiple fields and KEEP/DROP combinations Replace skip with capabilities in YML tests Fixed missing ql->esql import change afer merging main Merged two InvalidMappedField classes After the QL code was ported to esql.core, we can now make the edits directly in InvalidMappedField instead of having one extend the other. Move FieldAttribute edits from QL to ESQL ESQL: Prepare analyzer for LOOKUP (#109045) This extracts two fairly uncontroversial changes that were in the main LOOKUP PR into a smaller change that's easier to review. ESQL: Move serialization for EsField (#109222) This moves the serialization logic for `EsField` into the `EsField` subclasses to better align with the way rest of Elasticsearch works. It also switches them from ESQL's home grown `writeNamed` thing to `NamedWriteable`. These are wire compatible with one another. ESQL: Move serialization of `Attribute` (#109267) This moves the serialization of `Attribute` classes used in ESQL into the classes themselves to better line up with the rest of Elasticsearch. ES|QL: add MV_APPEND function (#107001) Adding `MV_APPEND(value1, value2)` function, that appends two values creating a single multi-value. If one or both the inputs are multi-values, the result is the concatenation of all the values, eg. ``` MV_APPEND([a, b], [c, d]) -> [a, b, c, d] ``` ~I think for this specific case it makes sense to consider `null` values as empty arrays, so that~ ~MV_APPEND(value, null) -> value~ ~It is pretty uncommon for ESQL (all the other functions, apart from `COALESCE`, short-circuit to `null` when one of the values is null), so let's discuss this behavior.~ [EDIT] considering the feedback from Andrei, I changed this logic and made it consistent with the other functions: now if one of the parameters is null, the function returns null [ES|QL] Convert string to datetime when the other size of an arithmetic operator is date_period or time_duration (#108455) * convert string to datetime when the other side of binary operator is temporal amount ESQL: Move `NamedExpression` serialization (#109380) This moves the serialization for the remaining `NamedExpression` subclass into the class itself, and switches all direct serialization of `NamedExpression`s to `readNamedWriteable` and friends. All other `NamedExpression` subclasses extend from `Attribute` who's serialization was moved ealier. They are already registered under the "category class" for `Attribute`. This also registers them as `NamedExpression`s. ESQL: Implement LOOKUP, an "inline" enrich (#107987) This adds support for `LOOKUP`, a command that implements a sort of inline `ENRICH`, using data that is passed in the request: ``` $ curl -uelastic:password -HContent-Type:application/json -XPOST \ 'localhost:9200/_query?error_trace&pretty&format=txt' \ -d'{ "query": "ROW a=1::LONG | LOOKUP t ON a", "tables": { "t": { "a:long": [ 1, 4, 2], "v1:integer": [ 10, 11, 12], "v2:keyword": ["cat", "dog", "wow"] } }, "version": "2024.04.01" }' v1 | v2 | a ---------------+---------------+--------------- 10 |cat |1 ``` This required these PRs: * #107624 * #107634 * #107701 * #107762 * Closes #107306 parent 32ac5ba755dd5c24364a210f1097ae093fdcbd75 author Craig Taverner 1717779549 +0200 committer Craig Taverner 1718115775 +0200 Fixed compile error after merging in main Fixed strange merge issues from main Remove version from ES|QL test queries after merging main Fixed union-types on nested fields Switch to Luigi's solution, and expand nested tests Cleanup after rebase * Added more tests from code review Note that one test, `multiIndexIpStringStatsInline` is muted due to failing with the error: UnresolvedException: Invalid call to dataType on an unresolved object ?client_ip * Make CsvTests consistent with integration tests for capabilities The integration tests do not fail the tests if the capability does not even exist on cluster nodes, instead the tests are ignored. The same behaviour should happen with CsvTests for consistency. * Return assumeThat to assertThat, but change order This way we don't have to add more features to the test framework in this PR, but we would probably want a mute feature (like a `skip` line). * Move serialization of MultiTypeEsField to NamedWritable approach Since the sub-fields are AbstractConvertFunction expressions, and Expression is not yet fully supported as a category class for NamedWritable, we need a few slight tweaks to this, notably registering this explicitly in the EsqlPlugin, as well as calling PlanStreamInput.readExpression() instead of StreamInput.readNamedWritable(Expression.class). These can be removed later once Expression is fully supported as a category class. * Remove attempt to mute two failed tests We used required_capability to mute the tests, but this caused issues with CsvTests which also uses this as a spelling mistake checker for typing the capability name wrong, so we tried to use muted-tests.yml, but that only mutes tests in specific run configurations (ie. we need to mute each and every IT class separately). So now we just remove the tests entirely. We left a comment in the muted-tests.yml file for future reference about how to mute csv-spec tests. * Fix rather massive issue with performance of testConcurrentSerialization Recreating the config on every test was very expensive. * Code review by Nik --------- Co-authored-by: Elastic Machine --- docs/changelog/107545.yaml | 6 + muted-tests.yml | 7 + .../index/mapper/BlockLoader.java | 10 + .../esql/core/expression/FieldAttribute.java | 6 +- .../esql/core/type/InvalidMappedField.java | 45 +- .../org/elasticsearch/compute/data/Page.java | 4 +- .../lucene/ValuesSourceReaderOperator.java | 89 +- .../ValueSourceReaderTypeConversionTests.java | 2020 +++++++++++++++++ .../elasticsearch/xpack/esql/CsvAssert.java | 18 +- .../xpack/esql/CsvTestsDataLoader.java | 12 + .../resources/mapping-sample_data_str.json | 16 + .../mapping-sample_data_ts_long.json | 16 + .../src/main/resources/sample_data_str.csv | 8 + .../main/resources/sample_data_ts_long.csv | 8 + .../src/main/resources/union_types.csv-spec | 719 ++++++ .../xpack/esql/action/EsqlCapabilities.java | 6 + .../xpack/esql/analysis/Analyzer.java | 172 +- .../convert/AbstractConvertFunction.java | 6 +- .../xpack/esql/plan/logical/EsRelation.java | 2 + .../planner/EsPhysicalOperationProviders.java | 131 +- .../xpack/esql/plugin/EsqlPlugin.java | 2 + .../xpack/esql/session/IndexResolver.java | 20 +- .../xpack/esql/type/MultiTypeEsField.java | 116 + .../elasticsearch/xpack/esql/CsvTests.java | 36 +- .../esql/type/MultiTypeEsFieldTests.java | 188 ++ .../test/esql/160_union_types.yml | 573 +++++ .../test/esql/161_union_types_subfields.yml | 203 ++ 27 files changed, 4362 insertions(+), 77 deletions(-) create mode 100644 docs/changelog/107545.yaml create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValueSourceReaderTypeConversionTests.java create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-sample_data_str.json create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-sample_data_ts_long.json create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_str.csv create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_ts_long.csv create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/MultiTypeEsField.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java create mode 100644 x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/160_union_types.yml create mode 100644 x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/161_union_types_subfields.yml diff --git a/docs/changelog/107545.yaml b/docs/changelog/107545.yaml new file mode 100644 index 0000000000000..ad457cc5a533f --- /dev/null +++ b/docs/changelog/107545.yaml @@ -0,0 +1,6 @@ +pr: 107545 +summary: "ESQL: Union Types Support" +area: ES|QL +type: enhancement +issues: + - 100603 diff --git a/muted-tests.yml b/muted-tests.yml index ef3c8188498a9..aef4b526e8b52 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -103,3 +103,10 @@ tests: # - class: org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToIPTests # method: testCrankyEvaluateBlockWithoutNulls # issue: https://github.com/elastic/elasticsearch/... +# +# Mute a single test in an ES|QL csv-spec test file: +# - class: "org.elasticsearch.xpack.esql.CsvTests" +# method: "test {union_types.MultiIndexIpStringStatsInline}" +# issue: "https://github.com/elastic/elasticsearch/..." +# Note that this mutes for the unit-test-like CsvTests only. +# Muting for the integration tests needs to be done for each IT class individually. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockLoader.java index a91f005d6d5ab..42feda3e9dd48 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockLoader.java @@ -92,6 +92,16 @@ interface StoredFields { */ SortedSetDocValues ordinals(LeafReaderContext context) throws IOException; + /** + * In support of 'Union Types', we sometimes desire that Blocks loaded from source are immediately + * converted in some way. Typically, this would be a type conversion, or an encoding conversion. + * @param block original block loaded from source + * @return converted block (or original if no conversion required) + */ + default Block convert(Block block) { + return block; + } + /** * Load blocks with only null. */ diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java index a6e713007a97f..0f7d92564c8ab 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java @@ -168,12 +168,14 @@ protected Attribute clone( @Override public int hashCode() { - return Objects.hash(super.hashCode(), path); + return Objects.hash(super.hashCode(), path, field); } @Override public boolean equals(Object obj) { - return super.equals(obj) && Objects.equals(path, ((FieldAttribute) obj).path); + return super.equals(obj) + && Objects.equals(path, ((FieldAttribute) obj).path) + && Objects.equals(field, ((FieldAttribute) obj).field); } @Override diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java index fd7bfbec4730f..9b088cfb19f6c 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java @@ -15,11 +15,15 @@ import java.io.IOException; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.TreeMap; /** * Representation of field mapped differently across indices. * Used during mapping discovery only. + * Note that the field typesToIndices is not serialized because that information is + * not required through the cluster, only surviving as long as the Analyser phase of query planning. + * It is used specifically for the 'union types' feature in ES|QL. */ public class InvalidMappedField extends EsField { static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( @@ -29,10 +33,10 @@ public class InvalidMappedField extends EsField { ); private final String errorMessage; + private final Map> typesToIndices; public InvalidMappedField(String name, String errorMessage, Map properties) { - super(name, DataType.UNSUPPORTED, properties, false); - this.errorMessage = errorMessage; + this(name, errorMessage, properties, Map.of()); } public InvalidMappedField(String name, String errorMessage) { @@ -43,6 +47,19 @@ public InvalidMappedField(String name) { this(name, StringUtils.EMPTY, new TreeMap<>()); } + /** + * Constructor supporting union types, used in ES|QL. + */ + public InvalidMappedField(String name, Map> typesToIndices) { + this(name, makeErrorMessage(typesToIndices), new TreeMap<>(), typesToIndices); + } + + private InvalidMappedField(String name, String errorMessage, Map properties, Map> typesToIndices) { + super(name, DataType.UNSUPPORTED, properties, false); + this.errorMessage = errorMessage; + this.typesToIndices = typesToIndices; + } + private InvalidMappedField(StreamInput in) throws IOException { this(in.readString(), in.readString(), in.readImmutableMap(StreamInput::readString, i -> i.readNamedWriteable(EsField.class))); } @@ -88,4 +105,28 @@ public EsField getExactField() { public Exact getExactInfo() { return new Exact(false, "Field [" + getName() + "] is invalid, cannot access it"); } + + public Map> getTypesToIndices() { + return typesToIndices; + } + + private static String makeErrorMessage(Map> typesToIndices) { + StringBuilder errorMessage = new StringBuilder(); + errorMessage.append("mapped as ["); + errorMessage.append(typesToIndices.size()); + errorMessage.append("] incompatible types: "); + boolean first = true; + for (Map.Entry> e : typesToIndices.entrySet()) { + if (first) { + first = false; + } else { + errorMessage.append(", "); + } + errorMessage.append("["); + errorMessage.append(e.getKey()); + errorMessage.append("] in "); + errorMessage.append(e.getValue()); + } + return errorMessage.toString(); + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Page.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Page.java index 4d41ab27312c3..2e46735bd5bd1 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Page.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Page.java @@ -83,7 +83,9 @@ private Page(boolean copyBlocks, int positionCount, Block[] blocks) { private Page(Page prev, Block[] toAdd) { for (Block block : toAdd) { if (prev.positionCount != block.getPositionCount()) { - throw new IllegalArgumentException("Block [" + block + "] does not have same position count"); + throw new IllegalArgumentException( + "Block [" + block + "] does not have same position count: " + block.getPositionCount() + " != " + prev.positionCount + ); } } this.positionCount = prev.positionCount; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java index 06b1375ac057e..ee747d98c26f8 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java @@ -165,6 +165,7 @@ public int get(int i) { } } success = true; + return page.appendBlocks(blocks); } catch (IOException e) { throw new UncheckedIOException(e); } finally { @@ -172,7 +173,6 @@ public int get(int i) { Releasables.closeExpectNoException(blocks); } } - return page.appendBlocks(blocks); } private void positionFieldWork(int shard, int segment, int firstDoc) { @@ -233,6 +233,7 @@ private void loadFromSingleLeaf(Block[] blocks, int shard, int segment, BlockLoa new RowStrideReaderWork( field.rowStride(ctx), (Block.Builder) field.loader.builder(loaderBlockFactory, docs.count()), + field.loader, f ) ); @@ -262,17 +263,13 @@ private void loadFromSingleLeaf(Block[] blocks, int shard, int segment, BlockLoa ); for (int p = 0; p < docs.count(); p++) { int doc = docs.get(p); - if (storedFields != null) { - storedFields.advanceTo(doc); - } - for (int r = 0; r < rowStrideReaders.size(); r++) { - RowStrideReaderWork work = rowStrideReaders.get(r); - work.reader.read(doc, storedFields, work.builder); + storedFields.advanceTo(doc); + for (RowStrideReaderWork work : rowStrideReaders) { + work.read(doc, storedFields); } } - for (int r = 0; r < rowStrideReaders.size(); r++) { - RowStrideReaderWork work = rowStrideReaders.get(r); - blocks[work.offset] = work.builder.build(); + for (RowStrideReaderWork work : rowStrideReaders) { + blocks[work.offset] = work.build(); } } finally { Releasables.close(rowStrideReaders); @@ -310,7 +307,9 @@ private class LoadFromMany implements Releasable { private final IntVector docs; private final int[] forwards; private final int[] backwards; - private final Block.Builder[] builders; + private final Block.Builder[][] builders; + private final BlockLoader[][] converters; + private final Block.Builder[] fieldTypeBuilders; private final BlockLoader.RowStrideReader[] rowStride; BlockLoaderStoredFieldsFromLeafLoader storedFields; @@ -322,21 +321,25 @@ private class LoadFromMany implements Releasable { docs = docVector.docs(); forwards = docVector.shardSegmentDocMapForwards(); backwards = docVector.shardSegmentDocMapBackwards(); - builders = new Block.Builder[target.length]; + fieldTypeBuilders = new Block.Builder[target.length]; + builders = new Block.Builder[target.length][shardContexts.size()]; + converters = new BlockLoader[target.length][shardContexts.size()]; rowStride = new BlockLoader.RowStrideReader[target.length]; } void run() throws IOException { for (int f = 0; f < fields.length; f++) { /* - * Important note: each block loader has a method to build an - * optimized block loader, but we have *many* fields and some - * of those block loaders may not be compatible with each other. - * So! We take the least common denominator which is the loader - * from the element expected element type. + * Important note: each field has a desired type, which might not match the mapped type (in the case of union-types). + * We create the final block builders using the desired type, one for each field, but then also use inner builders + * (one for each field and shard), and converters (again one for each field and shard) to actually perform the field + * loading in a way that is correct for the mapped field type, and then convert between that type and the desired type. */ - builders[f] = fields[f].info.type.newBlockBuilder(docs.getPositionCount(), blockFactory); + fieldTypeBuilders[f] = fields[f].info.type.newBlockBuilder(docs.getPositionCount(), blockFactory); + builders[f] = new Block.Builder[shardContexts.size()]; + converters[f] = new BlockLoader[shardContexts.size()]; } + ComputeBlockLoaderFactory loaderBlockFactory = new ComputeBlockLoaderFactory(blockFactory, docs.getPositionCount()); int p = forwards[0]; int shard = shards.getInt(p); int segment = segments.getInt(p); @@ -344,7 +347,8 @@ void run() throws IOException { positionFieldWork(shard, segment, firstDoc); LeafReaderContext ctx = ctx(shard, segment); fieldsMoved(ctx, shard); - read(firstDoc); + verifyBuilders(loaderBlockFactory, shard); + read(firstDoc, shard); for (int i = 1; i < forwards.length; i++) { p = forwards[i]; shard = shards.getInt(p); @@ -354,11 +358,19 @@ void run() throws IOException { ctx = ctx(shard, segment); fieldsMoved(ctx, shard); } - read(docs.getInt(p)); + verifyBuilders(loaderBlockFactory, shard); + read(docs.getInt(p), shard); } - for (int f = 0; f < builders.length; f++) { - try (Block orig = builders[f].build()) { - target[f] = orig.filter(backwards); + for (int f = 0; f < target.length; f++) { + for (int s = 0; s < shardContexts.size(); s++) { + if (builders[f][s] != null) { + try (Block orig = (Block) converters[f][s].convert(builders[f][s].build())) { + fieldTypeBuilders[f].copyFrom(orig, 0, orig.getPositionCount()); + } + } + } + try (Block targetBlock = fieldTypeBuilders[f].build()) { + target[f] = targetBlock.filter(backwards); } } } @@ -379,16 +391,29 @@ private void fieldsMoved(LeafReaderContext ctx, int shard) throws IOException { } } - private void read(int doc) throws IOException { + private void verifyBuilders(ComputeBlockLoaderFactory loaderBlockFactory, int shard) { + for (int f = 0; f < fields.length; f++) { + if (builders[f][shard] == null) { + // Note that this relies on field.newShard() to set the loader and converter correctly for the current shard + builders[f][shard] = (Block.Builder) fields[f].loader.builder(loaderBlockFactory, docs.getPositionCount()); + converters[f][shard] = fields[f].loader; + } + } + } + + private void read(int doc, int shard) throws IOException { storedFields.advanceTo(doc); for (int f = 0; f < builders.length; f++) { - rowStride[f].read(doc, storedFields, builders[f]); + rowStride[f].read(doc, storedFields, builders[f][shard]); } } @Override public void close() { - Releasables.closeExpectNoException(builders); + Releasables.closeExpectNoException(fieldTypeBuilders); + for (int f = 0; f < fields.length; f++) { + Releasables.closeExpectNoException(builders[f]); + } } } @@ -468,7 +493,17 @@ private void trackReader(String type, BlockLoader.Reader reader) { } } - private record RowStrideReaderWork(BlockLoader.RowStrideReader reader, Block.Builder builder, int offset) implements Releasable { + private record RowStrideReaderWork(BlockLoader.RowStrideReader reader, Block.Builder builder, BlockLoader loader, int offset) + implements + Releasable { + void read(int doc, BlockLoaderStoredFieldsFromLeafLoader storedFields) throws IOException { + reader.read(doc, storedFields, builder); + } + + Block build() { + return (Block) loader.convert(builder.build()); + } + @Override public void close() { builder.close(); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValueSourceReaderTypeConversionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValueSourceReaderTypeConversionTests.java new file mode 100644 index 0000000000000..66bcf2a57e393 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValueSourceReaderTypeConversionTests.java @@ -0,0 +1,2020 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.DoubleDocValuesField; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.SortedDocValuesField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.TestBlockFactory; +import org.elasticsearch.compute.operator.AnyOperatorTestCase; +import org.elasticsearch.compute.operator.CannedSourceOperator; +import org.elasticsearch.compute.operator.Driver; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.DriverRunner; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.compute.operator.PageConsumerOperator; +import org.elasticsearch.compute.operator.SequenceLongBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.compute.operator.TestResultPageSinkOperator; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.mapper.BlockLoader; +import org.elasticsearch.index.mapper.FieldNamesFieldMapper; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.KeywordFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.MapperServiceTestCase; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SourceLoader; +import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.mapper.TsidExtractingIdFieldMapper; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.fetch.StoredFieldsSpec; +import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.threadpool.FixedExecutorBuilder; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.util.StringUtils; +import org.hamcrest.Matcher; +import org.junit.After; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.LongStream; + +import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.elasticsearch.test.MapMatcher.matchesMap; +import static org.elasticsearch.xpack.esql.core.type.DataType.IP; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.oneOf; +import static org.hamcrest.Matchers.sameInstance; + +/** + * These tests are partial duplicates of the tests in ValuesSourceReaderOperatorTests, and focus on testing the behaviour + * of the ValuesSourceReaderOperator, but with a few key differences: + *
      + *
    • Multiple indexes and index mappings are defined and tested
    • + *
    • + * Most primitive types also include a field with prefix 'str_' which is stored and mapped as a string, + * but expected to be extracted and converted directly to the primitive type. + * For example: "str_long": "1" should be read directly into a field named "str_long" of type "long" and value 1. + * This tests the ability of the BlockLoader.convert(Block) method to convert a string to a primitive type. + *
    • + *
    • + * Each index has a few additional custom fields that are stored as specific types, but should be converted to strings by the + * BlockLoader.convert(Block) method. These fields are: + *
        + *
      • ip: stored as an IP type, but should be converted to a string
      • + *
      • duration: stored as a long type, but should be converted to a string
      • + *
      + * One index stores them as IP and long types, and the other as keyword types, so we test the behaviour of the + * 'union types' capabilities of the ValuesSourceReaderOperator class. + *
    • + *
    + * Since this test does not have access to the type conversion code in the ESQL module, we have mocks for that behaviour + * in the inner classes TestTypeConvertingBlockLoader and TestBlockConverter. + */ +@SuppressWarnings("resource") +public class ValueSourceReaderTypeConversionTests extends AnyOperatorTestCase { + private static final String[] PREFIX = new String[] { "a", "b", "c" }; + private static final Map INDICES = new LinkedHashMap<>(); + static { + addIndex( + Map.of( + "ip", + new TestFieldType<>("ip", IP, d -> "192.169.0." + d % 256, Checks::unionIPsAsStrings), + "duration", + new TestFieldType<>("duration", DataType.LONG, d -> (long) d, Checks::unionDurationsAsStrings) + ) + ); + addIndex( + Map.of( + "ip", + new TestFieldType<>("ip", DataType.KEYWORD, d -> "192.169.0." + d % 256, Checks::unionIPsAsStrings), + "duration", + new TestFieldType<>("duration", DataType.KEYWORD, d -> Integer.toString(d), Checks::unionDurationsAsStrings) + ) + ); + } + + static void addIndex(Map> fieldTypes) { + String indexKey = "index" + (INDICES.size() + 1); + INDICES.put(indexKey, new TestIndexMappingConfig(indexKey, INDICES.size(), fieldTypes)); + } + + private record TestIndexMappingConfig(String indexName, int shardIdx, Map> fieldTypes) {} + + private record TestFieldType(String name, DataType dataType, Function valueGenerator, CheckResults checkResults) {} + + private final Map directories = new HashMap<>(); + private final Map mapperServices = new HashMap<>(); + private final Map readers = new HashMap<>(); + private static final Map> keyToTags = new HashMap<>(); + + @After + public void closeIndex() throws IOException { + IOUtils.close(readers.values()); + IOUtils.close(directories.values()); + } + + private Directory directory(String indexKey) { + return directories.computeIfAbsent(indexKey, k -> newDirectory()); + } + + private MapperService mapperService(String indexKey) { + return mapperServices.get(indexKey); + } + + private List initShardContexts() { + return INDICES.keySet() + .stream() + .map(index -> new ValuesSourceReaderOperator.ShardContext(reader(index), () -> SourceLoader.FROM_STORED_SOURCE)) + .toList(); + } + + private IndexReader reader(String indexKey) { + if (readers.get(indexKey) == null) { + try { + initIndex(indexKey, 100, 10); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + return readers.get(indexKey); + } + + @Override + protected Operator.OperatorFactory simple() { + return factory(initShardContexts(), mapperService("index1").fieldType("long"), ElementType.LONG); + } + + public static Operator.OperatorFactory factory( + List shardContexts, + MappedFieldType ft, + ElementType elementType + ) { + return factory(shardContexts, ft.name(), elementType, ft.blockLoader(null)); + } + + private static Operator.OperatorFactory factory( + List shardContexts, + String name, + ElementType elementType, + BlockLoader loader + ) { + return new ValuesSourceReaderOperator.Factory(List.of(new ValuesSourceReaderOperator.FieldInfo(name, elementType, shardIdx -> { + if (shardIdx < 0 || shardIdx >= INDICES.size()) { + fail("unexpected shardIdx [" + shardIdx + "]"); + } + return loader; + })), shardContexts, 0); + } + + protected SourceOperator simpleInput(DriverContext context, int size) { + return simpleInput(context, size, commitEvery(size), randomPageSize()); + } + + private int commitEvery(int numDocs) { + return Math.max(1, (int) Math.ceil((double) numDocs / 10)); + } + + private SourceOperator simpleInput(DriverContext context, int size, int commitEvery, int pageSize) { + List shardContexts = new ArrayList<>(); + try { + for (String indexKey : INDICES.keySet()) { + initIndex(indexKey, size, commitEvery); + shardContexts.add(new LuceneSourceOperatorTests.MockShardContext(reader(indexKey), INDICES.get(indexKey).shardIdx)); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + var luceneFactory = new LuceneSourceOperator.Factory( + shardContexts, + ctx -> new MatchAllDocsQuery(), + DataPartitioning.SHARD, + 1,// randomIntBetween(1, 10), + pageSize, + LuceneOperator.NO_LIMIT + ); + return luceneFactory.get(context); + } + + private void initMapping(String indexKey) throws IOException { + TestIndexMappingConfig indexMappingConfig = INDICES.get(indexKey); + mapperServices.put(indexKey, new MapperServiceTestCase() { + }.createMapperService(MapperServiceTestCase.mapping(b -> { + fieldExamples(b, "key", "integer"); // unique key per-index to use for looking up test values to compare to + fieldExamples(b, "indexKey", "keyword"); // index name (can be used to choose index-specific test values) + fieldExamples(b, "int", "integer"); + fieldExamples(b, "short", "short"); + fieldExamples(b, "byte", "byte"); + fieldExamples(b, "long", "long"); + fieldExamples(b, "double", "double"); + fieldExamples(b, "kwd", "keyword"); + b.startObject("stored_kwd").field("type", "keyword").field("store", true).endObject(); + b.startObject("mv_stored_kwd").field("type", "keyword").field("store", true).endObject(); + + simpleField(b, "missing_text", "text"); + + for (Map.Entry> entry : indexMappingConfig.fieldTypes.entrySet()) { + String fieldName = entry.getKey(); + TestFieldType fieldType = entry.getValue(); + simpleField(b, fieldName, fieldType.dataType.typeName()); + } + }))); + } + + private void initIndex(String indexKey, int size, int commitEvery) throws IOException { + initMapping(indexKey); + readers.put(indexKey, initIndex(indexKey, directory(indexKey), size, commitEvery)); + } + + private IndexReader initIndex(String indexKey, Directory directory, int size, int commitEvery) throws IOException { + keyToTags.computeIfAbsent(indexKey, k -> new HashMap<>()).clear(); + TestIndexMappingConfig indexMappingConfig = INDICES.get(indexKey); + try ( + IndexWriter writer = new IndexWriter( + directory, + newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE).setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH) + ) + ) { + for (int d = 0; d < size; d++) { + XContentBuilder source = JsonXContent.contentBuilder(); + source.startObject(); + source.field("key", d); // documents in this index have a unique key, from which most other values can be derived + source.field("indexKey", indexKey); // all documents in this index have the same indexKey + + source.field("long", d); + source.field("str_long", Long.toString(d)); + source.startArray("mv_long"); + for (int v = 0; v <= d % 3; v++) { + source.value(-1_000L * d + v); + } + source.endArray(); + source.field("source_long", (long) d); + source.startArray("mv_source_long"); + for (int v = 0; v <= d % 3; v++) { + source.value(-1_000L * d + v); + } + source.endArray(); + + source.field("int", d); + source.field("str_int", Integer.toString(d)); + source.startArray("mv_int"); + for (int v = 0; v <= d % 3; v++) { + source.value(1_000 * d + v); + } + source.endArray(); + source.field("source_int", d); + source.startArray("mv_source_int"); + for (int v = 0; v <= d % 3; v++) { + source.value(1_000 * d + v); + } + source.endArray(); + + source.field("short", (short) d); + source.field("str_short", Short.toString((short) d)); + source.startArray("mv_short"); + for (int v = 0; v <= d % 3; v++) { + source.value((short) (2_000 * d + v)); + } + source.endArray(); + source.field("source_short", (short) d); + source.startArray("mv_source_short"); + for (int v = 0; v <= d % 3; v++) { + source.value((short) (2_000 * d + v)); + } + source.endArray(); + + source.field("byte", (byte) d); + source.field("str_byte", Byte.toString((byte) d)); + source.startArray("mv_byte"); + for (int v = 0; v <= d % 3; v++) { + source.value((byte) (3_000 * d + v)); + } + source.endArray(); + source.field("source_byte", (byte) d); + source.startArray("mv_source_byte"); + for (int v = 0; v <= d % 3; v++) { + source.value((byte) (3_000 * d + v)); + } + source.endArray(); + + source.field("double", d / 123_456d); + source.field("str_double", Double.toString(d / 123_456d)); + source.startArray("mv_double"); + for (int v = 0; v <= d % 3; v++) { + source.value(d / 123_456d + v); + } + source.endArray(); + source.field("source_double", d / 123_456d); + source.startArray("mv_source_double"); + for (int v = 0; v <= d % 3; v++) { + source.value(d / 123_456d + v); + } + source.endArray(); + + String tag = keyToTags.get(indexKey).computeIfAbsent(d, k -> "tag-" + randomIntBetween(1, 5)); + source.field("kwd", tag); + source.field("str_kwd", tag); + source.startArray("mv_kwd"); + for (int v = 0; v <= d % 3; v++) { + source.value(PREFIX[v] + d); + } + source.endArray(); + source.field("stored_kwd", Integer.toString(d)); + source.startArray("mv_stored_kwd"); + for (int v = 0; v <= d % 3; v++) { + source.value(PREFIX[v] + d); + } + source.endArray(); + source.field("source_kwd", Integer.toString(d)); + source.startArray("mv_source_kwd"); + for (int v = 0; v <= d % 3; v++) { + source.value(PREFIX[v] + d); + } + source.endArray(); + + source.field("text", Integer.toString(d)); + source.startArray("mv_text"); + for (int v = 0; v <= d % 3; v++) { + source.value(PREFIX[v] + d); + } + source.endArray(); + + for (Map.Entry> entry : indexMappingConfig.fieldTypes.entrySet()) { + String fieldName = entry.getKey(); + TestFieldType fieldType = entry.getValue(); + source.field(fieldName, fieldType.valueGenerator.apply(d)); + } + + source.endObject(); + + ParsedDocument doc = mapperService(indexKey).documentParser() + .parseDocument( + new SourceToParse("id" + d, BytesReference.bytes(source), XContentType.JSON), + mapperService(indexKey).mappingLookup() + ); + writer.addDocuments(doc.docs()); + + if (d % commitEvery == commitEvery - 1) { + writer.commit(); + } + } + } + return DirectoryReader.open(directory); + } + + @Override + protected Matcher expectedDescriptionOfSimple() { + return equalTo("ValuesSourceReaderOperator[fields = [long]]"); + } + + @Override + protected Matcher expectedToStringOfSimple() { + return expectedDescriptionOfSimple(); + } + + public void testLoadAll() { + DriverContext driverContext = driverContext(); + loadSimpleAndAssert( + driverContext, + CannedSourceOperator.collectPages(simpleInput(driverContext, between(100, 5000))), + Block.MvOrdering.SORTED_ASCENDING, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING + ); + } + + public void testLoadAllInOnePage() { + DriverContext driverContext = driverContext(); + loadSimpleAndAssert( + driverContext, + List.of(CannedSourceOperator.mergePages(CannedSourceOperator.collectPages(simpleInput(driverContext, between(100, 5000))))), + Block.MvOrdering.UNORDERED, + Block.MvOrdering.UNORDERED + ); + } + + public void testManySingleDocPages() { + String indexKey = "index1"; + DriverContext driverContext = driverContext(); + int numDocs = between(10, 100); + List input = CannedSourceOperator.collectPages(simpleInput(driverContext, numDocs, between(1, numDocs), 1)); + Randomness.shuffle(input); + List shardContexts = initShardContexts(); + List operators = new ArrayList<>(); + Checks checks = new Checks(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING); + FieldCase testCase = new FieldCase( + new KeywordFieldMapper.KeywordFieldType("kwd"), + ElementType.BYTES_REF, + checks::tags, + StatusChecks::keywordsFromDocValues + ); + // TODO: Add index2 + operators.add( + new ValuesSourceReaderOperator.Factory( + List.of(testCase.info, fieldInfo(mapperService(indexKey).fieldType("key"), ElementType.INT)), + shardContexts, + 0 + ).get(driverContext) + ); + List results = drive(operators, input.iterator(), driverContext); + assertThat(results, hasSize(input.size())); + for (Page page : results) { + assertThat(page.getBlockCount(), equalTo(3)); + IntVector keys = page.getBlock(2).asVector(); + for (int p = 0; p < page.getPositionCount(); p++) { + int key = keys.getInt(p); + testCase.checkResults.check(page.getBlock(1), p, key, indexKey); + } + } + } + + public void testEmpty() { + DriverContext driverContext = driverContext(); + loadSimpleAndAssert( + driverContext, + CannedSourceOperator.collectPages(simpleInput(driverContext, 0)), + Block.MvOrdering.UNORDERED, + Block.MvOrdering.UNORDERED + ); + } + + public void testLoadAllInOnePageShuffled() { + DriverContext driverContext = driverContext(); + Page source = CannedSourceOperator.mergePages(CannedSourceOperator.collectPages(simpleInput(driverContext, between(100, 5000)))); + List shuffleList = new ArrayList<>(); + IntStream.range(0, source.getPositionCount()).forEach(shuffleList::add); + Randomness.shuffle(shuffleList); + int[] shuffleArray = shuffleList.stream().mapToInt(Integer::intValue).toArray(); + Block[] shuffledBlocks = new Block[source.getBlockCount()]; + for (int b = 0; b < shuffledBlocks.length; b++) { + shuffledBlocks[b] = source.getBlock(b).filter(shuffleArray); + } + source = new Page(shuffledBlocks); + loadSimpleAndAssert(driverContext, List.of(source), Block.MvOrdering.UNORDERED, Block.MvOrdering.UNORDERED); + } + + private static ValuesSourceReaderOperator.FieldInfo fieldInfo(MappedFieldType ft, ElementType elementType) { + return new ValuesSourceReaderOperator.FieldInfo(ft.name(), elementType, shardIdx -> getBlockLoaderFor(shardIdx, ft, null)); + } + + private static ValuesSourceReaderOperator.FieldInfo fieldInfo(MappedFieldType ft, MappedFieldType ftX, ElementType elementType) { + return new ValuesSourceReaderOperator.FieldInfo(ft.name(), elementType, shardIdx -> getBlockLoaderFor(shardIdx, ft, ftX)); + } + + private ValuesSourceReaderOperator.FieldInfo fieldInfo(String fieldName, ElementType elementType, DataType toType) { + return new ValuesSourceReaderOperator.FieldInfo(fieldName, elementType, shardIdx -> getBlockLoaderFor(shardIdx, fieldName, toType)); + } + + private static MappedFieldType.BlockLoaderContext blContext() { + return new MappedFieldType.BlockLoaderContext() { + @Override + public String indexName() { + return "test_index"; + } + + @Override + public MappedFieldType.FieldExtractPreference fieldExtractPreference() { + return MappedFieldType.FieldExtractPreference.NONE; + } + + @Override + public SearchLookup lookup() { + throw new UnsupportedOperationException(); + } + + @Override + public Set sourcePaths(String name) { + return Set.of(name); + } + + @Override + public String parentField(String field) { + return null; + } + + @Override + public FieldNamesFieldMapper.FieldNamesFieldType fieldNames() { + return FieldNamesFieldMapper.FieldNamesFieldType.get(true); + } + }; + } + + private void loadSimpleAndAssert( + DriverContext driverContext, + List input, + Block.MvOrdering booleanAndNumericalDocValuesMvOrdering, + Block.MvOrdering bytesRefDocValuesMvOrdering + ) { + List cases = infoAndChecksForEachType(booleanAndNumericalDocValuesMvOrdering, bytesRefDocValuesMvOrdering); + List shardContexts = initShardContexts(); + List operators = new ArrayList<>(); + operators.add( + new ValuesSourceReaderOperator.Factory( + List.of( + fieldInfo(mapperService("index1").fieldType("key"), ElementType.INT), + fieldInfo(mapperService("index1").fieldType("indexKey"), ElementType.BYTES_REF) + ), + shardContexts, + 0 + ).get(driverContext) + ); + List tests = new ArrayList<>(); + while (cases.isEmpty() == false) { + List b = randomNonEmptySubsetOf(cases); + cases.removeAll(b); + tests.addAll(b); + operators.add( + new ValuesSourceReaderOperator.Factory(b.stream().map(i -> i.info).toList(), shardContexts, 0).get(driverContext) + ); + } + List results = drive(operators, input.iterator(), driverContext); + assertThat(results, hasSize(input.size())); + for (Page page : results) { + assertThat(page.getBlockCount(), equalTo(tests.size() + 3 /* one for doc, one for keys and one for indexKey */)); + IntVector keys = page.getBlock(1).asVector(); + BytesRefVector indexKeys = page.getBlock(2).asVector(); + for (int p = 0; p < page.getPositionCount(); p++) { + int key = keys.getInt(p); + String indexKey = indexKeys.getBytesRef(p, new BytesRef()).utf8ToString(); + for (int i = 0; i < tests.size(); i++) { + try { + tests.get(i).checkResults.check(page.getBlock(3 + i), p, key, indexKey); + } catch (AssertionError e) { + throw new AssertionError("error checking " + tests.get(i).info.name() + "[" + p + "]: " + e.getMessage(), e); + } + } + } + } + for (Operator op : operators) { + assertThat(((ValuesSourceReaderOperator) op).status().pagesProcessed(), equalTo(input.size())); + } + assertDriverContext(driverContext); + } + + interface CheckResults { + void check(Block block, int position, int key, String indexKey); + } + + interface CheckReaders { + void check(boolean forcedRowByRow, int pageCount, int segmentCount, Map readersBuilt); + } + + interface CheckReadersWithName { + void check(String name, boolean forcedRowByRow, int pageCount, int segmentCount, Map readersBuilt); + } + + record FieldCase(ValuesSourceReaderOperator.FieldInfo info, CheckResults checkResults, CheckReadersWithName checkReaders) { + FieldCase(MappedFieldType ft, ElementType elementType, CheckResults checkResults, CheckReadersWithName checkReaders) { + this(fieldInfo(ft, elementType), checkResults, checkReaders); + } + + FieldCase( + MappedFieldType ft, + MappedFieldType ftX, + ElementType elementType, + CheckResults checkResults, + CheckReadersWithName checkReaders + ) { + this(fieldInfo(ft, ftX, elementType), checkResults, checkReaders); + } + + FieldCase(MappedFieldType ft, ElementType elementType, CheckResults checkResults, CheckReaders checkReaders) { + this( + ft, + elementType, + checkResults, + (name, forcedRowByRow, pageCount, segmentCount, readersBuilt) -> checkReaders.check( + forcedRowByRow, + pageCount, + segmentCount, + readersBuilt + ) + ); + } + } + + /** + * Asserts that {@link ValuesSourceReaderOperator#status} claims that only + * the expected readers are built after loading singleton pages. + */ + public void testLoadAllStatus() { + testLoadAllStatus(false); + } + + /** + * Asserts that {@link ValuesSourceReaderOperator#status} claims that only + * the expected readers are built after loading non-singleton pages. + */ + public void testLoadAllStatusAllInOnePage() { + testLoadAllStatus(true); + } + + private void testLoadAllStatus(boolean allInOnePage) { + DriverContext driverContext = driverContext(); + int numDocs = between(100, 5000); + List input = CannedSourceOperator.collectPages(simpleInput(driverContext, numDocs, commitEvery(numDocs), numDocs)); + assertThat(input, hasSize(20)); + List shardContexts = initShardContexts(); + int totalSize = 0; + for (var shardContext : shardContexts) { + assertThat(shardContext.reader().leaves(), hasSize(10)); + totalSize += shardContext.reader().leaves().size(); + } + // Build one operator for each field, so we get a unique map to assert on + List cases = infoAndChecksForEachType( + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING + ); + List operators = cases.stream() + .map(i -> new ValuesSourceReaderOperator.Factory(List.of(i.info), shardContexts, 0).get(driverContext)) + .toList(); + if (allInOnePage) { + input = List.of(CannedSourceOperator.mergePages(input)); + } + drive(operators, input.iterator(), driverContext); + for (int i = 0; i < cases.size(); i++) { + ValuesSourceReaderOperator.Status status = (ValuesSourceReaderOperator.Status) operators.get(i).status(); + assertThat(status.pagesProcessed(), equalTo(input.size())); + FieldCase fc = cases.get(i); + fc.checkReaders.check(fc.info.name(), allInOnePage, input.size(), totalSize, status.readersBuilt()); + } + } + + private List infoAndChecksForEachType( + Block.MvOrdering booleanAndNumericalDocValuesMvOrdering, + Block.MvOrdering bytesRefDocValuesMvOrdering + ) { + MapperService mapperService = mapperService("index1"); // almost fields have identical mapper service + Checks checks = new Checks(booleanAndNumericalDocValuesMvOrdering, bytesRefDocValuesMvOrdering); + List r = new ArrayList<>(); + r.add(new FieldCase(mapperService.fieldType(IdFieldMapper.NAME), ElementType.BYTES_REF, checks::ids, StatusChecks::id)); + r.add(new FieldCase(TsidExtractingIdFieldMapper.INSTANCE.fieldType(), ElementType.BYTES_REF, checks::ids, StatusChecks::id)); + r.add(new FieldCase(mapperService.fieldType("long"), ElementType.LONG, checks::longs, StatusChecks::longsFromDocValues)); + r.add( + new FieldCase( + mapperService.fieldType("str_long"), + mapperService.fieldType("long"), + ElementType.LONG, + checks::longs, + StatusChecks::strFromDocValues + ) + ); + r.add( + new FieldCase( + mapperService.fieldType("mv_long"), + ElementType.LONG, + checks::mvLongsFromDocValues, + StatusChecks::mvLongsFromDocValues + ) + ); + r.add(new FieldCase(mapperService.fieldType("missing_long"), ElementType.LONG, checks::constantNulls, StatusChecks::constantNulls)); + r.add(new FieldCase(mapperService.fieldType("source_long"), ElementType.LONG, checks::longs, StatusChecks::longsFromSource)); + r.add( + new FieldCase( + mapperService.fieldType("mv_source_long"), + ElementType.LONG, + checks::mvLongsUnordered, + StatusChecks::mvLongsFromSource + ) + ); + r.add(new FieldCase(mapperService.fieldType("int"), ElementType.INT, checks::ints, StatusChecks::intsFromDocValues)); + r.add( + new FieldCase( + mapperService.fieldType("str_int"), + mapperService.fieldType("int"), + ElementType.INT, + checks::ints, + StatusChecks::strFromDocValues + ) + ); + r.add( + new FieldCase( + mapperService.fieldType("mv_int"), + ElementType.INT, + checks::mvIntsFromDocValues, + StatusChecks::mvIntsFromDocValues + ) + ); + r.add(new FieldCase(mapperService.fieldType("missing_int"), ElementType.INT, checks::constantNulls, StatusChecks::constantNulls)); + r.add(new FieldCase(mapperService.fieldType("source_int"), ElementType.INT, checks::ints, StatusChecks::intsFromSource)); + r.add( + new FieldCase( + mapperService.fieldType("mv_source_int"), + ElementType.INT, + checks::mvIntsUnordered, + StatusChecks::mvIntsFromSource + ) + ); + r.add(new FieldCase(mapperService.fieldType("short"), ElementType.INT, checks::shorts, StatusChecks::shortsFromDocValues)); + r.add( + new FieldCase( + mapperService.fieldType("str_short"), + mapperService.fieldType("short"), + ElementType.INT, + checks::shorts, + StatusChecks::strFromDocValues + ) + ); + r.add(new FieldCase(mapperService.fieldType("mv_short"), ElementType.INT, checks::mvShorts, StatusChecks::mvShortsFromDocValues)); + r.add(new FieldCase(mapperService.fieldType("missing_short"), ElementType.INT, checks::constantNulls, StatusChecks::constantNulls)); + r.add(new FieldCase(mapperService.fieldType("byte"), ElementType.INT, checks::bytes, StatusChecks::bytesFromDocValues)); + // r.add(new FieldCase(mapperService.fieldType("str_byte"), ElementType.INT, checks::bytes, StatusChecks::bytesFromDocValues)); + r.add(new FieldCase(mapperService.fieldType("mv_byte"), ElementType.INT, checks::mvBytes, StatusChecks::mvBytesFromDocValues)); + r.add(new FieldCase(mapperService.fieldType("missing_byte"), ElementType.INT, checks::constantNulls, StatusChecks::constantNulls)); + r.add(new FieldCase(mapperService.fieldType("double"), ElementType.DOUBLE, checks::doubles, StatusChecks::doublesFromDocValues)); + r.add( + new FieldCase( + mapperService.fieldType("str_double"), + mapperService.fieldType("double"), + ElementType.DOUBLE, + checks::doubles, + StatusChecks::strFromDocValues + ) + ); + r.add( + new FieldCase(mapperService.fieldType("mv_double"), ElementType.DOUBLE, checks::mvDoubles, StatusChecks::mvDoublesFromDocValues) + ); + r.add( + new FieldCase(mapperService.fieldType("missing_double"), ElementType.DOUBLE, checks::constantNulls, StatusChecks::constantNulls) + ); + r.add(new FieldCase(mapperService.fieldType("kwd"), ElementType.BYTES_REF, checks::tags, StatusChecks::keywordsFromDocValues)); + r.add( + new FieldCase( + mapperService.fieldType("mv_kwd"), + ElementType.BYTES_REF, + checks::mvStringsFromDocValues, + StatusChecks::mvKeywordsFromDocValues + ) + ); + r.add( + new FieldCase(mapperService.fieldType("missing_kwd"), ElementType.BYTES_REF, checks::constantNulls, StatusChecks::constantNulls) + ); + r.add(new FieldCase(storedKeywordField("stored_kwd"), ElementType.BYTES_REF, checks::strings, StatusChecks::keywordsFromStored)); + r.add( + new FieldCase( + storedKeywordField("mv_stored_kwd"), + ElementType.BYTES_REF, + checks::mvStringsUnordered, + StatusChecks::mvKeywordsFromStored + ) + ); + r.add( + new FieldCase(mapperService.fieldType("source_kwd"), ElementType.BYTES_REF, checks::strings, StatusChecks::keywordsFromSource) + ); + r.add( + new FieldCase( + mapperService.fieldType("mv_source_kwd"), + ElementType.BYTES_REF, + checks::mvStringsUnordered, + StatusChecks::mvKeywordsFromSource + ) + ); + r.add( + new FieldCase( + new ValuesSourceReaderOperator.FieldInfo( + "constant_bytes", + ElementType.BYTES_REF, + shardIdx -> BlockLoader.constantBytes(new BytesRef("foo")) + ), + checks::constantBytes, + StatusChecks::constantBytes + ) + ); + r.add( + new FieldCase( + new ValuesSourceReaderOperator.FieldInfo("null", ElementType.NULL, shardIdx -> BlockLoader.CONSTANT_NULLS), + checks::constantNulls, + StatusChecks::constantNulls + ) + ); + + // We only care about the field name at this point, so we can use any index mapper here + TestIndexMappingConfig indexMappingConfig = INDICES.get("index1"); + for (TestFieldType fieldType : indexMappingConfig.fieldTypes.values()) { + r.add( + new FieldCase( + fieldInfo(fieldType.name, ElementType.BYTES_REF, DataType.KEYWORD), + fieldType.checkResults, + StatusChecks::unionFromDocValues + ) + ); + } + Collections.shuffle(r, random()); + return r; + } + + record Checks(Block.MvOrdering booleanAndNumericalDocValuesMvOrdering, Block.MvOrdering bytesRefDocValuesMvOrdering) { + void longs(Block block, int position, int key, String indexKey) { + LongVector longs = ((LongBlock) block).asVector(); + assertThat(longs.getLong(position), equalTo((long) key)); + } + + void ints(Block block, int position, int key, String indexKey) { + IntVector ints = ((IntBlock) block).asVector(); + assertThat(ints.getInt(position), equalTo(key)); + } + + void shorts(Block block, int position, int key, String indexKey) { + IntVector ints = ((IntBlock) block).asVector(); + assertThat(ints.getInt(position), equalTo((int) (short) key)); + } + + void bytes(Block block, int position, int key, String indexKey) { + IntVector ints = ((IntBlock) block).asVector(); + assertThat(ints.getInt(position), equalTo((int) (byte) key)); + } + + void doubles(Block block, int position, int key, String indexKey) { + DoubleVector doubles = ((DoubleBlock) block).asVector(); + assertThat(doubles.getDouble(position), equalTo(key / 123_456d)); + } + + void strings(Block block, int position, int key, String indexKey) { + BytesRefVector keywords = ((BytesRefBlock) block).asVector(); + assertThat(keywords.getBytesRef(position, new BytesRef()).utf8ToString(), equalTo(Integer.toString(key))); + } + + static void unionIPsAsStrings(Block block, int position, int key, String indexKey) { + BytesRefVector keywords = ((BytesRefBlock) block).asVector(); + BytesRef bytesRef = keywords.getBytesRef(position, new BytesRef()); + TestIndexMappingConfig mappingConfig = INDICES.get(indexKey); + TestFieldType fieldType = mappingConfig.fieldTypes.get("ip"); + String expected = fieldType.valueGenerator.apply(key).toString(); + // Conversion should already be done in FieldInfo! + // BytesRef found = (fieldType.dataType.typeName().equals("ip")) ? new BytesRef(DocValueFormat.IP.format(bytesRef)) : bytesRef; + assertThat(bytesRef.utf8ToString(), equalTo(expected)); + } + + static void unionDurationsAsStrings(Block block, int position, int key, String indexKey) { + BytesRefVector keywords = ((BytesRefBlock) block).asVector(); + BytesRef bytesRef = keywords.getBytesRef(position, new BytesRef()); + TestIndexMappingConfig mappingConfig = INDICES.get(indexKey); + TestFieldType fieldType = mappingConfig.fieldTypes.get("duration"); + String expected = fieldType.valueGenerator.apply(key).toString(); + assertThat(bytesRef.utf8ToString(), equalTo(expected)); + } + + void tags(Block block, int position, int key, String indexKey) { + BytesRefVector keywords = ((BytesRefBlock) block).asVector(); + Object[] validTags = INDICES.keySet().stream().map(keyToTags::get).map(t -> t.get(key)).toArray(); + assertThat(keywords.getBytesRef(position, new BytesRef()).utf8ToString(), oneOf(validTags)); + } + + void ids(Block block, int position, int key, String indexKey) { + BytesRefVector ids = ((BytesRefBlock) block).asVector(); + assertThat(ids.getBytesRef(position, new BytesRef()).utf8ToString(), equalTo("id" + key)); + } + + void constantBytes(Block block, int position, int key, String indexKey) { + BytesRefVector keywords = ((BytesRefBlock) block).asVector(); + assertThat(keywords.getBytesRef(position, new BytesRef()).utf8ToString(), equalTo("foo")); + } + + void constantNulls(Block block, int position, int key, String indexKey) { + assertTrue(block.areAllValuesNull()); + assertTrue(block.isNull(position)); + } + + void mvLongsFromDocValues(Block block, int position, int key, String indexKey) { + mvLongs(block, position, key, booleanAndNumericalDocValuesMvOrdering); + } + + void mvLongsUnordered(Block block, int position, int key, String indexKey) { + mvLongs(block, position, key, Block.MvOrdering.UNORDERED); + } + + private void mvLongs(Block block, int position, int key, Block.MvOrdering expectedMv) { + LongBlock longs = (LongBlock) block; + assertThat(longs.getValueCount(position), equalTo(key % 3 + 1)); + int offset = longs.getFirstValueIndex(position); + for (int v = 0; v <= key % 3; v++) { + assertThat(longs.getLong(offset + v), equalTo(-1_000L * key + v)); + } + if (key % 3 > 0) { + assertThat(longs.mvOrdering(), equalTo(expectedMv)); + } + } + + void mvIntsFromDocValues(Block block, int position, int key, String indexKey) { + mvInts(block, position, key, booleanAndNumericalDocValuesMvOrdering); + } + + void mvIntsUnordered(Block block, int position, int key, String indexKey) { + mvInts(block, position, key, Block.MvOrdering.UNORDERED); + } + + private void mvInts(Block block, int position, int key, Block.MvOrdering expectedMv) { + IntBlock ints = (IntBlock) block; + assertThat(ints.getValueCount(position), equalTo(key % 3 + 1)); + int offset = ints.getFirstValueIndex(position); + for (int v = 0; v <= key % 3; v++) { + assertThat(ints.getInt(offset + v), equalTo(1_000 * key + v)); + } + if (key % 3 > 0) { + assertThat(ints.mvOrdering(), equalTo(expectedMv)); + } + } + + void mvShorts(Block block, int position, int key, String indexKey) { + IntBlock ints = (IntBlock) block; + assertThat(ints.getValueCount(position), equalTo(key % 3 + 1)); + int offset = ints.getFirstValueIndex(position); + for (int v = 0; v <= key % 3; v++) { + assertThat(ints.getInt(offset + v), equalTo((int) (short) (2_000 * key + v))); + } + if (key % 3 > 0) { + assertThat(ints.mvOrdering(), equalTo(booleanAndNumericalDocValuesMvOrdering)); + } + } + + void mvBytes(Block block, int position, int key, String indexKey) { + IntBlock ints = (IntBlock) block; + assertThat(ints.getValueCount(position), equalTo(key % 3 + 1)); + int offset = ints.getFirstValueIndex(position); + for (int v = 0; v <= key % 3; v++) { + assertThat(ints.getInt(offset + v), equalTo((int) (byte) (3_000 * key + v))); + } + if (key % 3 > 0) { + assertThat(ints.mvOrdering(), equalTo(booleanAndNumericalDocValuesMvOrdering)); + } + } + + void mvDoubles(Block block, int position, int key, String indexKey) { + DoubleBlock doubles = (DoubleBlock) block; + int offset = doubles.getFirstValueIndex(position); + for (int v = 0; v <= key % 3; v++) { + assertThat(doubles.getDouble(offset + v), equalTo(key / 123_456d + v)); + } + if (key % 3 > 0) { + assertThat(doubles.mvOrdering(), equalTo(booleanAndNumericalDocValuesMvOrdering)); + } + } + + void mvStringsFromDocValues(Block block, int position, int key, String indexKey) { + mvStrings(block, position, key, bytesRefDocValuesMvOrdering); + } + + void mvStringsUnordered(Block block, int position, int key, String indexKey) { + mvStrings(block, position, key, Block.MvOrdering.UNORDERED); + } + + void mvStrings(Block block, int position, int key, Block.MvOrdering expectedMv) { + BytesRefBlock text = (BytesRefBlock) block; + assertThat(text.getValueCount(position), equalTo(key % 3 + 1)); + int offset = text.getFirstValueIndex(position); + for (int v = 0; v <= key % 3; v++) { + assertThat(text.getBytesRef(offset + v, new BytesRef()).utf8ToString(), equalTo(PREFIX[v] + key)); + } + if (key % 3 > 0) { + assertThat(text.mvOrdering(), equalTo(expectedMv)); + } + } + } + + static class StatusChecks { + + static void strFromDocValues(String name, boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + docValues(name, "Ordinals", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void longsFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + docValues("long", "Longs", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void longsFromSource(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + source("source_long", "Longs", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void intsFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + docValues("int", "Ints", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void intsFromSource(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + source("source_int", "Ints", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void shortsFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + docValues("short", "Ints", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void bytesFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + docValues("byte", "Ints", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void doublesFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + docValues("double", "Doubles", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void keywordsFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + docValues("kwd", "Ordinals", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void keywordsFromStored(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + stored("stored_kwd", "Bytes", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void keywordsFromSource(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + source("source_kwd", "Bytes", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvLongsFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + mvDocValues("mv_long", "Longs", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvLongsFromSource(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + source("mv_source_long", "Longs", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvIntsFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + mvDocValues("mv_int", "Ints", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvIntsFromSource(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + source("mv_source_int", "Ints", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvShortsFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + mvDocValues("mv_short", "Ints", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvBytesFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + mvDocValues("mv_byte", "Ints", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvDoublesFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + mvDocValues("mv_double", "Doubles", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvKeywordsFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + mvDocValues("mv_kwd", "Ordinals", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvKeywordsFromStored(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + stored("mv_stored_kwd", "Bytes", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvKeywordsFromSource(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + source("mv_source_kwd", "Bytes", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void unionFromDocValues(String name, boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + // TODO: develop a working check for this + // docValues(name, "Ordinals", forcedRowByRow, pageCount, segmentCount, readers); + } + + private static void docValues( + String name, + String type, + boolean forcedRowByRow, + int pageCount, + int segmentCount, + Map readers + ) { + if (forcedRowByRow) { + assertMap( + "Expected segment count in " + readers + "\n", + readers, + matchesMap().entry(name + ":row_stride:BlockDocValuesReader.Singleton" + type, lessThanOrEqualTo(segmentCount)) + ); + } else { + assertMap( + "Expected segment count in " + readers + "\n", + readers, + matchesMap().entry(name + ":column_at_a_time:BlockDocValuesReader.Singleton" + type, lessThanOrEqualTo(pageCount)) + ); + } + } + + private static void mvDocValues( + String name, + String type, + boolean forcedRowByRow, + int pageCount, + int segmentCount, + Map readers + ) { + if (forcedRowByRow) { + Integer singletons = (Integer) readers.remove(name + ":row_stride:BlockDocValuesReader.Singleton" + type); + if (singletons != null) { + segmentCount -= singletons; + } + assertMap(readers, matchesMap().entry(name + ":row_stride:BlockDocValuesReader." + type, segmentCount)); + } else { + Integer singletons = (Integer) readers.remove(name + ":column_at_a_time:BlockDocValuesReader.Singleton" + type); + if (singletons != null) { + pageCount -= singletons; + } + assertMap( + readers, + matchesMap().entry(name + ":column_at_a_time:BlockDocValuesReader." + type, lessThanOrEqualTo(pageCount)) + ); + } + } + + static void id(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + stored("_id", "Id", forcedRowByRow, pageCount, segmentCount, readers); + } + + private static void source(String name, String type, boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + Matcher count; + if (forcedRowByRow) { + count = equalTo(segmentCount); + } else { + count = lessThanOrEqualTo(pageCount); + Integer columnAttempts = (Integer) readers.remove(name + ":column_at_a_time:null"); + assertThat(columnAttempts, not(nullValue())); + } + + Integer sequentialCount = (Integer) readers.remove("stored_fields[requires_source:true, fields:0, sequential: true]"); + Integer nonSequentialCount = (Integer) readers.remove("stored_fields[requires_source:true, fields:0, sequential: false]"); + int totalReaders = (sequentialCount == null ? 0 : sequentialCount) + (nonSequentialCount == null ? 0 : nonSequentialCount); + assertThat(totalReaders, count); + + assertMap(readers, matchesMap().entry(name + ":row_stride:BlockSourceReader." + type, count)); + } + + private static void stored(String name, String type, boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + Matcher count; + if (forcedRowByRow) { + count = equalTo(segmentCount); + } else { + count = lessThanOrEqualTo(pageCount); + Integer columnAttempts = (Integer) readers.remove(name + ":column_at_a_time:null"); + assertThat(columnAttempts, not(nullValue())); + } + + Integer sequentialCount = (Integer) readers.remove("stored_fields[requires_source:false, fields:1, sequential: true]"); + Integer nonSequentialCount = (Integer) readers.remove("stored_fields[requires_source:false, fields:1, sequential: false]"); + int totalReaders = (sequentialCount == null ? 0 : sequentialCount) + (nonSequentialCount == null ? 0 : nonSequentialCount); + assertThat(totalReaders, count); + + assertMap(readers, matchesMap().entry(name + ":row_stride:BlockStoredFieldsReader." + type, count)); + } + + static void constantBytes(String name, boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + if (forcedRowByRow) { + assertMap(readers, matchesMap().entry(name + ":row_stride:constant[[66 6f 6f]]", segmentCount)); + } else { + assertMap(readers, matchesMap().entry(name + ":column_at_a_time:constant[[66 6f 6f]]", lessThanOrEqualTo(pageCount))); + } + } + + static void constantNulls(String name, boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + if (forcedRowByRow) { + assertMap(readers, matchesMap().entry(name + ":row_stride:constant_nulls", segmentCount)); + } else { + assertMap(readers, matchesMap().entry(name + ":column_at_a_time:constant_nulls", lessThanOrEqualTo(pageCount))); + } + } + } + + public void testWithNulls() throws IOException { + String indexKey = "index1"; + mapperServices.put(indexKey, new MapperServiceTestCase() { + }.createMapperService(MapperServiceTestCase.mapping(b -> { + fieldExamples(b, "i", "integer"); + fieldExamples(b, "j", "long"); + fieldExamples(b, "d", "double"); + }))); + MappedFieldType intFt = mapperService(indexKey).fieldType("i"); + MappedFieldType longFt = mapperService(indexKey).fieldType("j"); + MappedFieldType doubleFt = mapperService(indexKey).fieldType("d"); + MappedFieldType kwFt = new KeywordFieldMapper.KeywordFieldType("kw"); + + NumericDocValuesField intField = new NumericDocValuesField(intFt.name(), 0); + NumericDocValuesField longField = new NumericDocValuesField(longFt.name(), 0); + NumericDocValuesField doubleField = new DoubleDocValuesField(doubleFt.name(), 0); + final int numDocs = between(100, 5000); + try (RandomIndexWriter w = new RandomIndexWriter(random(), directory(indexKey))) { + Document doc = new Document(); + for (int i = 0; i < numDocs; i++) { + doc.clear(); + intField.setLongValue(i); + doc.add(intField); + if (i % 100 != 0) { // Do not set field for every 100 values + longField.setLongValue(i); + doc.add(longField); + doubleField.setDoubleValue(i); + doc.add(doubleField); + doc.add(new SortedDocValuesField(kwFt.name(), new BytesRef("kw=" + i))); + } + w.addDocument(doc); + } + w.commit(); + readers.put(indexKey, w.getReader()); + } + LuceneSourceOperatorTests.MockShardContext shardContext = new LuceneSourceOperatorTests.MockShardContext(reader(indexKey), 0); + DriverContext driverContext = driverContext(); + var luceneFactory = new LuceneSourceOperator.Factory( + List.of(shardContext), + ctx -> new MatchAllDocsQuery(), + randomFrom(DataPartitioning.values()), + randomIntBetween(1, 10), + randomPageSize(), + LuceneOperator.NO_LIMIT + ); + var vsShardContext = new ValuesSourceReaderOperator.ShardContext(reader(indexKey), () -> SourceLoader.FROM_STORED_SOURCE); + try ( + Driver driver = new Driver( + driverContext, + luceneFactory.get(driverContext), + List.of( + factory(List.of(vsShardContext), intFt, ElementType.INT).get(driverContext), + factory(List.of(vsShardContext), longFt, ElementType.LONG).get(driverContext), + factory(List.of(vsShardContext), doubleFt, ElementType.DOUBLE).get(driverContext), + factory(List.of(vsShardContext), kwFt, ElementType.BYTES_REF).get(driverContext) + ), + new PageConsumerOperator(page -> { + try { + logger.debug("New page: {}", page); + IntBlock intValuesBlock = page.getBlock(1); + LongBlock longValuesBlock = page.getBlock(2); + DoubleBlock doubleValuesBlock = page.getBlock(3); + BytesRefBlock keywordValuesBlock = page.getBlock(4); + + for (int i = 0; i < page.getPositionCount(); i++) { + assertFalse(intValuesBlock.isNull(i)); + long j = intValuesBlock.getInt(i); + // Every 100 documents we set fields to null + boolean fieldIsEmpty = j % 100 == 0; + assertEquals(fieldIsEmpty, longValuesBlock.isNull(i)); + assertEquals(fieldIsEmpty, doubleValuesBlock.isNull(i)); + assertEquals(fieldIsEmpty, keywordValuesBlock.isNull(i)); + } + } finally { + page.releaseBlocks(); + } + }), + () -> {} + ) + ) { + runDriver(driver); + } + assertDriverContext(driverContext); + } + + private XContentBuilder fieldExamples(XContentBuilder builder, String name, String type) throws IOException { + simpleField(builder, name, type); + simpleField(builder, "str_" + name, "keyword"); + simpleField(builder, "mv_" + name, type); + simpleField(builder, "missing_" + name, type); + sourceField(builder, "source_" + name, type); + return sourceField(builder, "mv_source_" + name, type); + } + + private XContentBuilder simpleField(XContentBuilder builder, String name, String type) throws IOException { + return builder.startObject(name).field("type", type).endObject(); + } + + private XContentBuilder sourceField(XContentBuilder builder, String name, String type) throws IOException { + return builder.startObject(name).field("type", type).field("store", false).field("doc_values", false).endObject(); + } + + private KeywordFieldMapper.KeywordFieldType storedKeywordField(String name) { + FieldType ft = new FieldType(KeywordFieldMapper.Defaults.FIELD_TYPE); + ft.setDocValuesType(DocValuesType.NONE); + ft.setStored(true); + ft.freeze(); + return new KeywordFieldMapper.KeywordFieldType( + name, + ft, + Lucene.KEYWORD_ANALYZER, + Lucene.KEYWORD_ANALYZER, + Lucene.KEYWORD_ANALYZER, + new KeywordFieldMapper.Builder(name, IndexVersion.current()).docValues(false), + true // TODO randomize - load from stored keyword fields if stored even in synthetic source + ); + } + + @AwaitsFix(bugUrl = "Get working for multiple indices") + public void testNullsShared() { + DriverContext driverContext = driverContext(); + List shardContexts = initShardContexts(); + int[] pages = new int[] { 0 }; + try ( + Driver d = new Driver( + driverContext, + simpleInput(driverContext, 10), + List.of( + new ValuesSourceReaderOperator.Factory( + List.of( + new ValuesSourceReaderOperator.FieldInfo("null1", ElementType.NULL, shardIdx -> BlockLoader.CONSTANT_NULLS), + new ValuesSourceReaderOperator.FieldInfo("null2", ElementType.NULL, shardIdx -> BlockLoader.CONSTANT_NULLS) + ), + shardContexts, + 0 + ).get(driverContext) + ), + new PageConsumerOperator(page -> { + try { + assertThat(page.getBlockCount(), equalTo(3)); + assertThat(page.getBlock(1).areAllValuesNull(), equalTo(true)); + assertThat(page.getBlock(2).areAllValuesNull(), equalTo(true)); + assertThat(page.getBlock(1), sameInstance(page.getBlock(2))); + pages[0]++; + } finally { + page.releaseBlocks(); + } + }), + () -> {} + ) + ) { + runDriver(d); + } + assertThat(pages[0], greaterThan(0)); + assertDriverContext(driverContext); + } + + public void testDescriptionOfMany() throws IOException { + String indexKey = "index1"; + initIndex(indexKey, 1, 1); + Block.MvOrdering ordering = randomFrom(Block.MvOrdering.values()); + List cases = infoAndChecksForEachType(ordering, ordering); + + ValuesSourceReaderOperator.Factory factory = new ValuesSourceReaderOperator.Factory( + cases.stream().map(c -> c.info).toList(), + List.of(new ValuesSourceReaderOperator.ShardContext(reader(indexKey), () -> SourceLoader.FROM_STORED_SOURCE)), + 0 + ); + assertThat(factory.describe(), equalTo("ValuesSourceReaderOperator[fields = [" + cases.size() + " fields]]")); + try (Operator op = factory.get(driverContext())) { + assertThat(op.toString(), equalTo("ValuesSourceReaderOperator[fields = [" + cases.size() + " fields]]")); + } + } + + public void testManyShards() throws IOException { + String indexKey = "index1"; + initMapping(indexKey); + int shardCount = between(2, 10); + int size = between(100, 1000); + Directory[] dirs = new Directory[shardCount]; + IndexReader[] readers = new IndexReader[shardCount]; + Closeable[] closeMe = new Closeable[shardCount * 2]; + Set seenShards = new TreeSet<>(); + Map keyCounts = new TreeMap<>(); + try { + for (int d = 0; d < dirs.length; d++) { + closeMe[d * 2 + 1] = dirs[d] = newDirectory(); + closeMe[d * 2] = readers[d] = initIndex(indexKey, dirs[d], size, between(10, size * 2)); + } + List contexts = new ArrayList<>(); + List readerShardContexts = new ArrayList<>(); + for (int s = 0; s < shardCount; s++) { + contexts.add(new LuceneSourceOperatorTests.MockShardContext(readers[s], s)); + readerShardContexts.add(new ValuesSourceReaderOperator.ShardContext(readers[s], () -> SourceLoader.FROM_STORED_SOURCE)); + } + var luceneFactory = new LuceneSourceOperator.Factory( + contexts, + ctx -> new MatchAllDocsQuery(), + DataPartitioning.SHARD, + randomIntBetween(1, 10), + 1000, + LuceneOperator.NO_LIMIT + ); + // TODO add index2 + MappedFieldType ft = mapperService(indexKey).fieldType("key"); + var readerFactory = new ValuesSourceReaderOperator.Factory( + List.of(new ValuesSourceReaderOperator.FieldInfo("key", ElementType.INT, shardIdx -> { + seenShards.add(shardIdx); + return ft.blockLoader(blContext()); + })), + readerShardContexts, + 0 + ); + DriverContext driverContext = driverContext(); + List results = drive( + readerFactory.get(driverContext), + CannedSourceOperator.collectPages(luceneFactory.get(driverContext)).iterator(), + driverContext + ); + assertThat(seenShards, equalTo(IntStream.range(0, shardCount).boxed().collect(Collectors.toCollection(TreeSet::new)))); + for (Page p : results) { + IntBlock keyBlock = p.getBlock(1); + IntVector keys = keyBlock.asVector(); + for (int i = 0; i < keys.getPositionCount(); i++) { + keyCounts.merge(keys.getInt(i), 1, Integer::sum); + } + } + assertThat(keyCounts.keySet(), hasSize(size)); + for (int k = 0; k < size; k++) { + assertThat(keyCounts.get(k), equalTo(shardCount)); + } + } finally { + IOUtils.close(closeMe); + } + } + + protected final List drive(Operator operator, Iterator input, DriverContext driverContext) { + return drive(List.of(operator), input, driverContext); + } + + protected final List drive(List operators, Iterator input, DriverContext driverContext) { + List results = new ArrayList<>(); + boolean success = false; + try ( + Driver d = new Driver( + driverContext, + new CannedSourceOperator(input), + operators, + new TestResultPageSinkOperator(results::add), + () -> {} + ) + ) { + runDriver(d); + success = true; + } finally { + if (success == false) { + Releasables.closeExpectNoException(Releasables.wrap(() -> Iterators.map(results.iterator(), p -> p::releaseBlocks))); + } + } + return results; + } + + public static void runDriver(Driver driver) { + runDriver(List.of(driver)); + } + + public static void runDriver(List drivers) { + drivers = new ArrayList<>(drivers); + int dummyDrivers = between(0, 10); + for (int i = 0; i < dummyDrivers; i++) { + drivers.add( + new Driver( + "dummy-session", + 0, + 0, + new DriverContext(BigArrays.NON_RECYCLING_INSTANCE, TestBlockFactory.getNonBreakingInstance()), + () -> "dummy-driver", + new SequenceLongBlockSourceOperator( + TestBlockFactory.getNonBreakingInstance(), + LongStream.range(0, between(1, 100)), + between(1, 100) + ), + List.of(), + new PageConsumerOperator(Page::releaseBlocks), + Driver.DEFAULT_STATUS_INTERVAL, + () -> {} + ) + ); + } + Randomness.shuffle(drivers); + int numThreads = between(1, 16); + ThreadPool threadPool = new TestThreadPool( + getTestClass().getSimpleName(), + new FixedExecutorBuilder(Settings.EMPTY, "esql", numThreads, 1024, "esql", EsExecutors.TaskTrackingConfig.DEFAULT) + ); + var driverRunner = new DriverRunner(threadPool.getThreadContext()) { + @Override + protected void start(Driver driver, ActionListener driverListener) { + Driver.start(threadPool.getThreadContext(), threadPool.executor("esql"), driver, between(1, 10000), driverListener); + } + }; + PlainActionFuture future = new PlainActionFuture<>(); + try { + driverRunner.runToCompletion(drivers, future); + future.actionGet(TimeValue.timeValueSeconds(30)); + } finally { + terminate(threadPool); + } + } + + public static void assertDriverContext(DriverContext driverContext) { + assertTrue(driverContext.isFinished()); + assertThat(driverContext.getSnapshot().releasables(), empty()); + } + + public static int randomPageSize() { + if (randomBoolean()) { + return between(1, 16); + } else { + return between(1, 16 * 1024); + } + } + + /** + * This method will produce the same converter for all shards, which makes it useful for general type converting tests, + * but not specifically union-types tests which require different converters for each shard. + */ + private static BlockLoader getBlockLoaderFor(int shardIdx, MappedFieldType ft, MappedFieldType ftX) { + if (shardIdx < 0 || shardIdx >= INDICES.size()) { + fail("unexpected shardIdx [" + shardIdx + "]"); + } + BlockLoader blockLoader = ft.blockLoader(blContext()); + if (ftX != null && ftX.typeName().equals(ft.typeName()) == false) { + blockLoader = new TestTypeConvertingBlockLoader(blockLoader, ft.typeName(), ftX.typeName()); + } else { + TestIndexMappingConfig mappingConfig = INDICES.get("index" + (shardIdx + 1)); + TestFieldType testFieldType = mappingConfig.fieldTypes.get(ft.name()); + if (testFieldType != null) { + blockLoader = new TestTypeConvertingBlockLoader(blockLoader, testFieldType.dataType.typeName(), "keyword"); + } + } + return blockLoader; + } + + /** + * This method is used to generate shard-specific field information, so we can have different types and BlockLoaders for each shard. + */ + private BlockLoader getBlockLoaderFor(int shardIdx, String fieldName, DataType toType) { + if (shardIdx < 0 || shardIdx >= INDICES.size()) { + fail("unexpected shardIdx [" + shardIdx + "]"); + } + String indexKey = "index" + (shardIdx + 1); + TestIndexMappingConfig mappingConfig = INDICES.get(indexKey); + TestFieldType testFieldType = mappingConfig.fieldTypes.get(fieldName); + if (testFieldType == null) { + throw new IllegalArgumentException("Unknown test field: " + fieldName); + } + MapperService mapper = mapperService(indexKey); + MappedFieldType ft = mapper.fieldType(fieldName); + BlockLoader blockLoader = ft.blockLoader(blContext()); + blockLoader = new TestTypeConvertingBlockLoader(blockLoader, testFieldType.dataType.typeName(), toType.typeName()); + return blockLoader; + } + + /** + * The implementation of union-types relies on the BlockLoader.convert(Block) to convert the block to the correct type + * at the point it is read from source, so that the rest of the query only deals with a single type for that field. + * This is implemented in the 'esql' module, and so we have a mock for this behaviour here, which is a simplified subset of the + * features in the real implementation. + */ + static class TestTypeConvertingBlockLoader implements BlockLoader { + protected final BlockLoader delegate; + private final EvalOperator.ExpressionEvaluator convertEvaluator; + + protected TestTypeConvertingBlockLoader(BlockLoader delegate, String fromTypeName, String toTypeName) { + this.delegate = delegate; + DriverContext driverContext = new DriverContext( + BigArrays.NON_RECYCLING_INSTANCE, + new org.elasticsearch.compute.data.BlockFactory( + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + BigArrays.NON_RECYCLING_INSTANCE + ) + ); + TestBlockConverter blockConverter = TestDataTypeConverters.blockConverter(driverContext, fromTypeName, toTypeName); + this.convertEvaluator = new EvalOperator.ExpressionEvaluator() { + @Override + public org.elasticsearch.compute.data.Block eval(Page page) { + org.elasticsearch.compute.data.Block block = page.getBlock(0); + return blockConverter.convert(block); + } + + @Override + public void close() {} + }; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + // Return the delegates builder, which can build the original mapped type, before conversion + return delegate.builder(factory, expectedCount); + } + + @Override + public Block convert(Block block) { + Page page = new Page((org.elasticsearch.compute.data.Block) block); + return convertEvaluator.eval(page); + } + + @Override + public ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) throws IOException { + ColumnAtATimeReader reader = delegate.columnAtATimeReader(context); + if (reader == null) { + return null; + } + return new ColumnAtATimeReader() { + @Override + public Block read(BlockFactory factory, Docs docs) throws IOException { + Block block = reader.read(factory, docs); + Page page = new Page((org.elasticsearch.compute.data.Block) block); + return convertEvaluator.eval(page); + } + + @Override + public boolean canReuse(int startingDocID) { + return reader.canReuse(startingDocID); + } + + @Override + public String toString() { + return reader.toString(); + } + }; + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) throws IOException { + // We do no type conversion here, since that will be done in the ValueSourceReaderOperator for row-stride cases + // Using the BlockLoader.convert(Block) function defined above + return delegate.rowStrideReader(context); + } + + @Override + public StoredFieldsSpec rowStrideStoredFieldSpec() { + return delegate.rowStrideStoredFieldSpec(); + } + + @Override + public boolean supportsOrdinals() { + return delegate.supportsOrdinals(); + } + + @Override + public SortedSetDocValues ordinals(LeafReaderContext context) throws IOException { + return delegate.ordinals(context); + } + + @Override + public final String toString() { + return "TypeConvertingBlockLoader[delegate=" + delegate + "]"; + } + } + + @FunctionalInterface + private interface TestBlockConverter { + Block convert(Block block); + } + + /** + * Blocks that should be converted from some type to a string (keyword) can use this converter. + */ + private abstract static class BlockToStringConverter implements TestBlockConverter { + private final DriverContext driverContext; + + BlockToStringConverter(DriverContext driverContext) { + this.driverContext = driverContext; + } + + @Override + public Block convert(Block block) { + int positionCount = block.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + BytesRef value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendBytesRef(value); + valuesAppended = true; + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } finally { + block.close(); + } + } + + abstract BytesRef evalValue(Block container, int index); + } + + /** + * Blocks that should be converted from a string (keyword) to some other type can use this converter. + */ + private abstract static class TestBlockFromStringConverter implements TestBlockConverter { + protected final DriverContext driverContext; + + TestBlockFromStringConverter(DriverContext driverContext) { + this.driverContext = driverContext; + } + + @Override + public Block convert(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (Block.Builder builder = blockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + T value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + appendValue(builder, value); + valuesAppended = true; + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } finally { + b.close(); + } + } + + abstract Block.Builder blockBuilder(int expectedCount); + + abstract void appendValue(Block.Builder builder, T value); + + abstract T evalValue(BytesRefBlock container, int index, BytesRef scratchPad); + } + + private static class TestLongBlockToStringConverter extends BlockToStringConverter { + TestLongBlockToStringConverter(DriverContext driverContext) { + super(driverContext); + } + + @Override + BytesRef evalValue(Block container, int index) { + return new BytesRef(Long.toString(((LongBlock) container).getLong(index))); + } + } + + private static class TestLongBlockFromStringConverter extends TestBlockFromStringConverter { + TestLongBlockFromStringConverter(DriverContext driverContext) { + super(driverContext); + } + + @Override + Block.Builder blockBuilder(int expectedCount) { + return driverContext.blockFactory().newLongBlockBuilder(expectedCount); + } + + @Override + Long evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + return StringUtils.parseLong(container.getBytesRef(index, scratchPad).utf8ToString()); + } + + @Override + void appendValue(Block.Builder builder, Long value) { + ((LongBlock.Builder) builder).appendLong(value); + } + } + + private static class TestIntegerBlockToStringConverter extends BlockToStringConverter { + TestIntegerBlockToStringConverter(DriverContext driverContext) { + super(driverContext); + } + + @Override + BytesRef evalValue(Block container, int index) { + return new BytesRef(Integer.toString(((IntBlock) container).getInt(index))); + } + } + + private static class TestIntegerBlockFromStringConverter extends TestBlockFromStringConverter { + TestIntegerBlockFromStringConverter(DriverContext driverContext) { + super(driverContext); + } + + @Override + Block.Builder blockBuilder(int expectedCount) { + return driverContext.blockFactory().newIntBlockBuilder(expectedCount); + } + + @Override + Integer evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + return (int) StringUtils.parseLong(container.getBytesRef(index, scratchPad).utf8ToString()); + } + + @Override + void appendValue(Block.Builder builder, Integer value) { + ((IntBlock.Builder) builder).appendInt(value); + } + } + + private static class TestBooleanBlockToStringConverter extends BlockToStringConverter { + + TestBooleanBlockToStringConverter(DriverContext driverContext) { + super(driverContext); + } + + @Override + BytesRef evalValue(Block container, int index) { + return ((BooleanBlock) container).getBoolean(index) ? new BytesRef("true") : new BytesRef("false"); + } + } + + private static class TestBooleanBlockFromStringConverter extends TestBlockFromStringConverter { + + TestBooleanBlockFromStringConverter(DriverContext driverContext) { + super(driverContext); + } + + @Override + Block.Builder blockBuilder(int expectedCount) { + return driverContext.blockFactory().newBooleanBlockBuilder(expectedCount); + } + + @Override + void appendValue(Block.Builder builder, Boolean value) { + ((BooleanBlock.Builder) builder).appendBoolean(value); + } + + @Override + Boolean evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + return Boolean.parseBoolean(container.getBytesRef(index, scratchPad).utf8ToString()); + } + } + + private static class TestDoubleBlockToStringConverter extends BlockToStringConverter { + + TestDoubleBlockToStringConverter(DriverContext driverContext) { + super(driverContext); + } + + @Override + BytesRef evalValue(Block container, int index) { + return new BytesRef(Double.toString(((DoubleBlock) container).getDouble(index))); + } + } + + private static class TestDoubleBlockFromStringConverter extends TestBlockFromStringConverter { + + TestDoubleBlockFromStringConverter(DriverContext driverContext) { + super(driverContext); + } + + @Override + Block.Builder blockBuilder(int expectedCount) { + return driverContext.blockFactory().newDoubleBlockBuilder(expectedCount); + } + + @Override + void appendValue(Block.Builder builder, Double value) { + ((DoubleBlock.Builder) builder).appendDouble(value); + } + + @Override + Double evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + return Double.parseDouble(container.getBytesRef(index, scratchPad).utf8ToString()); + } + } + + /** + * Many types are backed by BytesRef block, but encode their contents in different ways. + * For example, the IP type has a 16-byte block that encodes both IPv4 and IPv6 as 16byte-IPv6 binary byte arrays. + * But the KEYWORD type has a BytesRef block that encodes the keyword as a UTF-8 string, + * and it typically has a much shorter length for IP data, for example, "192.168.0.1" is 11 bytes. + * Converting blocks between these types involves converting the BytesRef block to the specific internal type, + * and then back to a BytesRef block with the other encoding. + */ + private abstract static class TestBytesRefToBytesRefConverter extends BlockToStringConverter { + + BytesRef scratchPad = new BytesRef(); + + TestBytesRefToBytesRefConverter(DriverContext driverContext) { + super(driverContext); + } + + @Override + BytesRef evalValue(Block container, int index) { + return convertByteRef(((BytesRefBlock) container).getBytesRef(index, scratchPad)); + } + + abstract BytesRef convertByteRef(BytesRef bytesRef); + } + + private static class TestIPToStringConverter extends TestBytesRefToBytesRefConverter { + + TestIPToStringConverter(DriverContext driverContext) { + super(driverContext); + } + + @Override + BytesRef convertByteRef(BytesRef bytesRef) { + return new BytesRef(DocValueFormat.IP.format(bytesRef)); + } + } + + private static class TestStringToIPConverter extends TestBytesRefToBytesRefConverter { + + TestStringToIPConverter(DriverContext driverContext) { + super(driverContext); + } + + @Override + BytesRef convertByteRef(BytesRef bytesRef) { + return StringUtils.parseIP(bytesRef.utf8ToString()); + } + } + + /** + * Utility class for creating type-specific converters based on their typeNamne values. + * We do not support all possibly combinations, but only those that are needed for the tests. + * In particular, either the 'from' or 'to' types must be KEYWORD. + */ + private static class TestDataTypeConverters { + public static TestBlockConverter blockConverter(DriverContext driverContext, String fromTypeName, String toTypeName) { + if (toTypeName == null || fromTypeName.equals(toTypeName)) { + return b -> b; + } + if (isString(fromTypeName)) { + return switch (toTypeName) { + case "boolean" -> new TestBooleanBlockFromStringConverter(driverContext); + case "short", "integer" -> new TestIntegerBlockFromStringConverter(driverContext); + case "long" -> new TestLongBlockFromStringConverter(driverContext); + case "double", "float" -> new TestDoubleBlockFromStringConverter(driverContext); + case "ip" -> new TestStringToIPConverter(driverContext); + default -> throw new UnsupportedOperationException("Conversion from string to " + toTypeName + " is not supported"); + }; + } + if (isString(toTypeName)) { + return switch (fromTypeName) { + case "boolean" -> new TestBooleanBlockToStringConverter(driverContext); + case "short", "integer" -> new TestIntegerBlockToStringConverter(driverContext); + case "long" -> new TestLongBlockToStringConverter(driverContext); + case "double", "float" -> new TestDoubleBlockToStringConverter(driverContext); + case "ip" -> new TestIPToStringConverter(driverContext); + default -> throw new UnsupportedOperationException("Conversion from " + fromTypeName + " to string is not supported"); + }; + } + throw new UnsupportedOperationException("Conversion from " + fromTypeName + " to " + toTypeName + " is not supported"); + } + + private static boolean isString(String typeName) { + return typeName.equals("keyword") || typeName.equals("text"); + } + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java index af3af033efd4c..875058ba6e0e4 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java @@ -41,7 +41,6 @@ import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; public final class CsvAssert { @@ -110,6 +109,9 @@ private static void assertMetadata( if (actualType == Type.INTEGER && expectedType == Type.LONG) { actualType = Type.LONG; } + if (actualType == null) { + actualType = Type.NULL; + } assertEquals( "Different column type for column [" + expectedName + "] (" + expectedType + " != " + actualType + ")", @@ -188,7 +190,13 @@ public static void assertData( for (int row = 0; row < expectedValues.size(); row++) { try { - assertTrue("Expected more data but no more entries found after [" + row + "]", row < actualValues.size()); + if (row >= actualValues.size()) { + if (dataFailures.isEmpty()) { + fail("Expected more data but no more entries found after [" + row + "]"); + } else { + dataFailure(dataFailures, "Expected more data but no more entries found after [" + row + "]\n"); + } + } if (logger != null) { logger.info(row(actualValues, row)); @@ -257,7 +265,11 @@ public static void assertData( } private static void dataFailure(List dataFailures) { - fail("Data mismatch:\n" + dataFailures.stream().map(f -> { + dataFailure(dataFailures, ""); + } + + private static void dataFailure(List dataFailures, String prefixError) { + fail(prefixError + "Data mismatch:\n" + dataFailures.stream().map(f -> { Description description = new StringDescription(); ListMatcher expected; if (f.expected instanceof List e) { diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index 1c1ec3194fef5..ec5770e8ce70b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -57,6 +57,16 @@ public class CsvTestsDataLoader { private static final TestsDataset LANGUAGES = new TestsDataset("languages", "mapping-languages.json", "languages.csv"); private static final TestsDataset UL_LOGS = new TestsDataset("ul_logs", "mapping-ul_logs.json", "ul_logs.csv"); private static final TestsDataset SAMPLE_DATA = new TestsDataset("sample_data", "mapping-sample_data.json", "sample_data.csv"); + private static final TestsDataset SAMPLE_DATA_STR = new TestsDataset( + "sample_data_str", + "mapping-sample_data_str.json", + "sample_data_str.csv" + ); + private static final TestsDataset SAMPLE_DATA_TS_LONG = new TestsDataset( + "sample_data_ts_long", + "mapping-sample_data_ts_long.json", + "sample_data_ts_long.csv" + ); private static final TestsDataset CLIENT_IPS = new TestsDataset("clientips", "mapping-clientips.json", "clientips.csv"); private static final TestsDataset CLIENT_CIDR = new TestsDataset("client_cidr", "mapping-client_cidr.json", "client_cidr.csv"); private static final TestsDataset AGES = new TestsDataset("ages", "mapping-ages.json", "ages.csv"); @@ -95,6 +105,8 @@ public class CsvTestsDataLoader { Map.entry(LANGUAGES.indexName, LANGUAGES), Map.entry(UL_LOGS.indexName, UL_LOGS), Map.entry(SAMPLE_DATA.indexName, SAMPLE_DATA), + Map.entry(SAMPLE_DATA_STR.indexName, SAMPLE_DATA_STR), + Map.entry(SAMPLE_DATA_TS_LONG.indexName, SAMPLE_DATA_TS_LONG), Map.entry(CLIENT_IPS.indexName, CLIENT_IPS), Map.entry(CLIENT_CIDR.indexName, CLIENT_CIDR), Map.entry(AGES.indexName, AGES), diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-sample_data_str.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-sample_data_str.json new file mode 100644 index 0000000000000..9e97de8c92928 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-sample_data_str.json @@ -0,0 +1,16 @@ +{ + "properties": { + "@timestamp": { + "type": "date" + }, + "client_ip": { + "type": "keyword" + }, + "event_duration": { + "type": "long" + }, + "message": { + "type": "keyword" + } + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-sample_data_ts_long.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-sample_data_ts_long.json new file mode 100644 index 0000000000000..ecf21a2a919d0 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-sample_data_ts_long.json @@ -0,0 +1,16 @@ +{ + "properties": { + "@timestamp": { + "type": "long" + }, + "client_ip": { + "type": "ip" + }, + "event_duration": { + "type": "long" + }, + "message": { + "type": "keyword" + } + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_str.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_str.csv new file mode 100644 index 0000000000000..bc98671adc7ff --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_str.csv @@ -0,0 +1,8 @@ +@timestamp:date,client_ip:keyword,event_duration:long,message:keyword +2023-10-23T13:55:01.543Z,172.21.3.15,1756467,Connected to 10.1.0.1 +2023-10-23T13:53:55.832Z,172.21.3.15,5033755,Connection error +2023-10-23T13:52:55.015Z,172.21.3.15,8268153,Connection error +2023-10-23T13:51:54.732Z,172.21.3.15,725448,Connection error +2023-10-23T13:33:34.937Z,172.21.0.5,1232382,Disconnected +2023-10-23T12:27:28.948Z,172.21.2.113,2764889,Connected to 10.1.0.2 +2023-10-23T12:15:03.360Z,172.21.2.162,3450233,Connected to 10.1.0.3 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_ts_long.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_ts_long.csv new file mode 100644 index 0000000000000..2a6add2ea624d --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_ts_long.csv @@ -0,0 +1,8 @@ +@timestamp:long,client_ip:ip,event_duration:long,message:keyword +1698069301543,172.21.3.15,1756467,Connected to 10.1.0.1 +1698069235832,172.21.3.15,5033755,Connection error +1698069175015,172.21.3.15,8268153,Connection error +1698069114732,172.21.3.15,725448,Connection error +1698068014937,172.21.0.5,1232382,Disconnected +1698064048948,172.21.2.113,2764889,Connected to 10.1.0.2 +1698063303360,172.21.2.162,3450233,Connected to 10.1.0.3 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec new file mode 100644 index 0000000000000..ee8c4be385e0f --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec @@ -0,0 +1,719 @@ +singleIndexIp +FROM sample_data +| EVAL client_ip = TO_IP(client_ip) +| KEEP @timestamp, client_ip, event_duration, message +| SORT @timestamp DESC +; + +@timestamp:date | client_ip:ip | event_duration:long | message:keyword +2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + +singleIndexWhereIpLike +FROM sample_data +| WHERE TO_STRING(client_ip) LIKE "172.21.2.*" +| KEEP @timestamp, event_duration, message +| SORT @timestamp DESC +; + +@timestamp:date | event_duration:long | message:keyword +2023-10-23T12:27:28.948Z | 2764889 | Connected to 10.1.0.2 +2023-10-23T12:15:03.360Z | 3450233 | Connected to 10.1.0.3 +; + +singleIndexTsLong +FROM sample_data_ts_long +| EVAL @timestamp = TO_DATETIME(@timestamp) +| KEEP @timestamp, client_ip, event_duration, message +| SORT @timestamp DESC +; + +@timestamp:date | client_ip:ip | event_duration:long | message:keyword +2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + +singleIndexIpStats +FROM sample_data +| EVAL client_ip = TO_IP(client_ip) +| STATS count=count(*) BY client_ip +| SORT count DESC, client_ip ASC +| KEEP count, client_ip +; + +count:long | client_ip:ip +4 | 172.21.3.15 +1 | 172.21.0.5 +1 | 172.21.2.113 +1 | 172.21.2.162 +; + +singleIndexIpStringStats +FROM sample_data_str +| EVAL client_ip = TO_IP(client_ip) +| STATS count=count(*) BY client_ip +| SORT count DESC, client_ip ASC +| KEEP count, client_ip +; + +count:long | client_ip:ip +4 | 172.21.3.15 +1 | 172.21.0.5 +1 | 172.21.2.113 +1 | 172.21.2.162 +; + +multiIndexIpString +required_capability: union_types +required_capability: metadata_fields + +FROM sample_data, sample_data_str METADATA _index +| EVAL client_ip = TO_IP(client_ip) +| KEEP _index, @timestamp, client_ip, event_duration, message +| SORT _index ASC, @timestamp DESC +; + +_index:keyword | @timestamp:date | client_ip:ip | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_str | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_str | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_str | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_str | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_str | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_str | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + +multiIndexIpStringRename +required_capability: union_types +required_capability: metadata_fields + +FROM sample_data, sample_data_str METADATA _index +| EVAL host_ip = TO_IP(client_ip) +| KEEP _index, @timestamp, host_ip, event_duration, message +| SORT _index ASC, @timestamp DESC +; + +_index:keyword | @timestamp:date | host_ip:ip | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_str | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_str | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_str | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_str | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_str | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_str | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + +multiIndexIpStringRenameToString +required_capability: union_types +required_capability: metadata_fields + +FROM sample_data, sample_data_str METADATA _index +| EVAL host_ip = TO_STRING(TO_IP(client_ip)) +| KEEP _index, @timestamp, host_ip, event_duration, message +| SORT _index ASC, @timestamp DESC +; + +_index:keyword | @timestamp:date | host_ip:keyword | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_str | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_str | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_str | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_str | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_str | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_str | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + +multiIndexWhereIpString +required_capability: union_types +required_capability: metadata_fields + +FROM sample_data, sample_data_str METADATA _index +| WHERE STARTS_WITH(TO_STRING(client_ip), "172.21.2") +| KEEP _index, @timestamp, event_duration, message +| SORT _index ASC, @timestamp DESC +; + +_index:keyword | @timestamp:date | event_duration:long | message:keyword +sample_data | 2023-10-23T12:27:28.948Z | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360Z | 3450233 | Connected to 10.1.0.3 +sample_data_str | 2023-10-23T12:27:28.948Z | 2764889 | Connected to 10.1.0.2 +sample_data_str | 2023-10-23T12:15:03.360Z | 3450233 | Connected to 10.1.0.3 +; + +multiIndexWhereIpStringLike +required_capability: union_types +required_capability: metadata_fields + +FROM sample_data, sample_data_str METADATA _index +| WHERE TO_STRING(client_ip) LIKE "172.21.2.*" +| KEEP _index, @timestamp, event_duration, message +| SORT _index ASC, @timestamp DESC +; + +_index:keyword | @timestamp:date | event_duration:long | message:keyword +sample_data | 2023-10-23T12:27:28.948Z | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360Z | 3450233 | Connected to 10.1.0.3 +sample_data_str | 2023-10-23T12:27:28.948Z | 2764889 | Connected to 10.1.0.2 +sample_data_str | 2023-10-23T12:15:03.360Z | 3450233 | Connected to 10.1.0.3 +; + +multiIndexIpStringStats +required_capability: union_types + +FROM sample_data, sample_data_str +| EVAL client_ip = TO_IP(client_ip) +| STATS count=count(*) BY client_ip +| SORT count DESC, client_ip ASC +| KEEP count, client_ip +; + +count:long | client_ip:ip +8 | 172.21.3.15 +2 | 172.21.0.5 +2 | 172.21.2.113 +2 | 172.21.2.162 +; + +multiIndexIpStringRenameStats +required_capability: union_types + +FROM sample_data, sample_data_str +| EVAL host_ip = TO_IP(client_ip) +| STATS count=count(*) BY host_ip +| SORT count DESC, host_ip ASC +| KEEP count, host_ip +; + +count:long | host_ip:ip +8 | 172.21.3.15 +2 | 172.21.0.5 +2 | 172.21.2.113 +2 | 172.21.2.162 +; + +multiIndexIpStringRenameToStringStats +required_capability: union_types + +FROM sample_data, sample_data_str +| EVAL host_ip = TO_STRING(TO_IP(client_ip)) +| STATS count=count(*) BY host_ip +| SORT count DESC, host_ip ASC +| KEEP count, host_ip +; + +count:long | host_ip:keyword +8 | 172.21.3.15 +2 | 172.21.0.5 +2 | 172.21.2.113 +2 | 172.21.2.162 +; + +multiIndexIpStringStatsInline +required_capability: union_types +required_capability: union_types_inline_fix + +FROM sample_data, sample_data_str +| STATS count=count(*) BY client_ip = TO_IP(client_ip) +| SORT count DESC, client_ip ASC +| KEEP count, client_ip +; + +count:long | client_ip:ip +8 | 172.21.3.15 +2 | 172.21.0.5 +2 | 172.21.2.113 +2 | 172.21.2.162 +; + +multiIndexWhereIpStringStats +required_capability: union_types + +FROM sample_data, sample_data_str +| WHERE STARTS_WITH(TO_STRING(client_ip), "172.21.2") +| STATS count=count(*) BY message +| SORT count DESC, message ASC +| KEEP count, message +; + +count:long | message:keyword +2 | Connected to 10.1.0.2 +2 | Connected to 10.1.0.3 +; + +multiIndexTsLong +required_capability: union_types +required_capability: metadata_fields + +FROM sample_data, sample_data_ts_long METADATA _index +| EVAL @timestamp = TO_DATETIME(@timestamp) +| KEEP _index, @timestamp, client_ip, event_duration, message +| SORT _index ASC, @timestamp DESC +; + +_index:keyword | @timestamp:date | client_ip:ip | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_long | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_long | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_long | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_long | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + +multiIndexTsLongRename +required_capability: union_types +required_capability: metadata_fields + +FROM sample_data, sample_data_ts_long METADATA _index +| EVAL ts = TO_DATETIME(@timestamp) +| KEEP _index, ts, client_ip, event_duration, message +| SORT _index ASC, ts DESC +; + +_index:keyword | ts:date | client_ip:ip | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_long | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_long | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_long | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_long | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + +multiIndexTsLongRenameToString +required_capability: union_types +required_capability: metadata_fields + +FROM sample_data, sample_data_ts_long METADATA _index +| EVAL ts = TO_STRING(TO_DATETIME(@timestamp)) +| KEEP _index, ts, client_ip, event_duration, message +| SORT _index ASC, ts DESC +; + +_index:keyword | ts:keyword | client_ip:ip | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_long | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_long | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_long | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_long | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + +multiIndexWhereTsLong +required_capability: union_types +required_capability: metadata_fields + +FROM sample_data, sample_data_ts_long METADATA _index +| WHERE TO_LONG(@timestamp) < 1698068014937 +| KEEP _index, client_ip, event_duration, message +| SORT _index ASC, client_ip ASC +; + +_index:keyword | client_ip:ip | event_duration:long | message:keyword +sample_data | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_long | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + +multiIndexTsLongStats +required_capability: union_types + +FROM sample_data, sample_data_ts_long +| EVAL @timestamp = DATE_TRUNC(1 hour, TO_DATETIME(@timestamp)) +| STATS count=count(*) BY @timestamp +| SORT count DESC, @timestamp ASC +| KEEP count, @timestamp +; + +count:long | @timestamp:date +10 | 2023-10-23T13:00:00.000Z +4 | 2023-10-23T12:00:00.000Z +; + +multiIndexTsLongRenameStats +required_capability: union_types + +FROM sample_data, sample_data_ts_long +| EVAL hour = DATE_TRUNC(1 hour, TO_DATETIME(@timestamp)) +| STATS count=count(*) BY hour +| SORT count DESC, hour ASC +| KEEP count, hour +; + +count:long | hour:date +10 | 2023-10-23T13:00:00.000Z +4 | 2023-10-23T12:00:00.000Z +; + +multiIndexTsLongRenameToDatetimeToStringStats +required_capability: union_types + +FROM sample_data, sample_data_ts_long +| EVAL hour = LEFT(TO_STRING(TO_DATETIME(@timestamp)), 13) +| STATS count=count(*) BY hour +| SORT count DESC, hour ASC +| KEEP count, hour +; + +count:long | hour:keyword +10 | 2023-10-23T13 +4 | 2023-10-23T12 +; + +multiIndexTsLongRenameToStringStats +required_capability: union_types + +FROM sample_data, sample_data_ts_long +| EVAL mess = LEFT(TO_STRING(@timestamp), 7) +| STATS count=count(*) BY mess +| SORT count DESC, mess DESC +| KEEP count, mess +; + +count:long | mess:keyword +7 | 2023-10 +4 | 1698069 +1 | 1698068 +1 | 1698064 +1 | 1698063 +; + +multiIndexTsLongStatsInline +required_capability: union_types + +FROM sample_data, sample_data_ts_long +| STATS count=COUNT(*), max=MAX(TO_DATETIME(@timestamp)) +| KEEP count, max +; + +count:long | max:date +14 | 2023-10-23T13:55:01.543Z +; + +multiIndexTsLongStatsInlineDropped +required_capability: union_types + +FROM sample_data, sample_data_ts_long +| STATS count=COUNT(*), max=MAX(TO_DATETIME(@timestamp)) +| KEEP count +; + +count:long +14 +; + +multiIndexWhereTsLongStats +required_capability: union_types + +FROM sample_data, sample_data_ts_long +| WHERE TO_LONG(@timestamp) < 1698068014937 +| STATS count=count(*) BY message +| SORT count DESC, message ASC +| KEEP count, message +; + +count:long | message:keyword +2 | Connected to 10.1.0.2 +2 | Connected to 10.1.0.3 +; + +multiIndexIpStringTsLong +required_capability: union_types +required_capability: metadata_fields + +FROM sample_data* METADATA _index +| EVAL @timestamp = TO_DATETIME(@timestamp), client_ip = TO_IP(client_ip) +| KEEP _index, @timestamp, client_ip, event_duration, message +| SORT _index ASC, @timestamp DESC +; + +_index:keyword | @timestamp:date | client_ip:ip | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_str | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_str | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_str | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_str | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_str | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_str | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_long | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_long | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_long | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_long | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + +multiIndexIpStringTsLongDropped +required_capability: union_types +required_capability: metadata_fields + +FROM sample_data* METADATA _index +| EVAL @timestamp = TO_DATETIME(@timestamp), client_ip = TO_IP(client_ip) +| KEEP _index, event_duration, message +| SORT _index ASC, event_duration ASC +; + +_index:keyword | event_duration:long | message:keyword +sample_data | 725448 | Connection error +sample_data | 1232382 | Disconnected +sample_data | 1756467 | Connected to 10.1.0.1 +sample_data | 2764889 | Connected to 10.1.0.2 +sample_data | 3450233 | Connected to 10.1.0.3 +sample_data | 5033755 | Connection error +sample_data | 8268153 | Connection error +sample_data_str | 725448 | Connection error +sample_data_str | 1232382 | Disconnected +sample_data_str | 1756467 | Connected to 10.1.0.1 +sample_data_str | 2764889 | Connected to 10.1.0.2 +sample_data_str | 3450233 | Connected to 10.1.0.3 +sample_data_str | 5033755 | Connection error +sample_data_str | 8268153 | Connection error +sample_data_ts_long | 725448 | Connection error +sample_data_ts_long | 1232382 | Disconnected +sample_data_ts_long | 1756467 | Connected to 10.1.0.1 +sample_data_ts_long | 2764889 | Connected to 10.1.0.2 +sample_data_ts_long | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 5033755 | Connection error +sample_data_ts_long | 8268153 | Connection error +; + +multiIndexIpStringTsLongRename +required_capability: union_types +required_capability: metadata_fields + +FROM sample_data* METADATA _index +| EVAL ts = TO_DATETIME(@timestamp), host_ip = TO_IP(client_ip) +| KEEP _index, ts, host_ip, event_duration, message +| SORT _index ASC, ts DESC +; + +_index:keyword | ts:date | host_ip:ip | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_str | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_str | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_str | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_str | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_str | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_str | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_long | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_long | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_long | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_long | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + +multiIndexIpStringTsLongRenameDropped +required_capability: union_types +required_capability: metadata_fields + +FROM sample_data* METADATA _index +| EVAL ts = TO_DATETIME(@timestamp), host_ip = TO_IP(client_ip) +| KEEP _index, event_duration, message +| SORT _index ASC, event_duration ASC +; + +_index:keyword | event_duration:long | message:keyword +sample_data | 725448 | Connection error +sample_data | 1232382 | Disconnected +sample_data | 1756467 | Connected to 10.1.0.1 +sample_data | 2764889 | Connected to 10.1.0.2 +sample_data | 3450233 | Connected to 10.1.0.3 +sample_data | 5033755 | Connection error +sample_data | 8268153 | Connection error +sample_data_str | 725448 | Connection error +sample_data_str | 1232382 | Disconnected +sample_data_str | 1756467 | Connected to 10.1.0.1 +sample_data_str | 2764889 | Connected to 10.1.0.2 +sample_data_str | 3450233 | Connected to 10.1.0.3 +sample_data_str | 5033755 | Connection error +sample_data_str | 8268153 | Connection error +sample_data_ts_long | 725448 | Connection error +sample_data_ts_long | 1232382 | Disconnected +sample_data_ts_long | 1756467 | Connected to 10.1.0.1 +sample_data_ts_long | 2764889 | Connected to 10.1.0.2 +sample_data_ts_long | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 5033755 | Connection error +sample_data_ts_long | 8268153 | Connection error +; + +multiIndexIpStringTsLongRenameToString +required_capability: union_types +required_capability: metadata_fields + +FROM sample_data* METADATA _index +| EVAL ts = TO_STRING(TO_DATETIME(@timestamp)), host_ip = TO_STRING(TO_IP(client_ip)) +| KEEP _index, ts, host_ip, event_duration, message +| SORT _index ASC, ts DESC +; + +_index:keyword | ts:keyword | host_ip:keyword | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_str | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_str | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_str | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_str | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_str | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_str | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_long | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_long | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_long | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_long | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + +multiIndexWhereIpStringTsLong +required_capability: union_types +required_capability: metadata_fields + +FROM sample_data* METADATA _index +| WHERE TO_LONG(@timestamp) < 1698068014937 AND TO_STRING(client_ip) == "172.21.2.162" +| KEEP _index, event_duration, message +| SORT _index ASC, message ASC +; + +_index:keyword | event_duration:long | message:keyword +sample_data | 3450233 | Connected to 10.1.0.3 +sample_data_str | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 3450233 | Connected to 10.1.0.3 +; + +multiIndexWhereIpStringTsLongStats +required_capability: union_types + +FROM sample_data* +| WHERE TO_LONG(@timestamp) < 1698068014937 AND TO_STRING(client_ip) == "172.21.2.162" +| STATS count=count(*) BY message +| SORT count DESC, message ASC +| KEEP count, message +; + +count:long | message:keyword +3 | Connected to 10.1.0.3 +; + +multiIndexWhereIpStringLikeTsLong +required_capability: union_types +required_capability: metadata_fields + +FROM sample_data* METADATA _index +| WHERE TO_LONG(@timestamp) < 1698068014937 AND TO_STRING(client_ip) LIKE "172.21.2.16?" +| KEEP _index, event_duration, message +| SORT _index ASC, message ASC +; + +_index:keyword | event_duration:long | message:keyword +sample_data | 3450233 | Connected to 10.1.0.3 +sample_data_str | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 3450233 | Connected to 10.1.0.3 +; + +multiIndexWhereIpStringLikeTsLongStats +required_capability: union_types + +FROM sample_data* +| WHERE TO_LONG(@timestamp) < 1698068014937 AND TO_STRING(client_ip) LIKE "172.21.2.16?" +| STATS count=count(*) BY message +| SORT count DESC, message ASC +| KEEP count, message +; + +count:long | message:keyword +3 | Connected to 10.1.0.3 +; + +multiIndexMultiColumnTypesRename +required_capability: union_types +required_capability: metadata_fields + +FROM sample_data* METADATA _index +| WHERE event_duration > 8000000 +| EVAL ts = TO_DATETIME(@timestamp), ts_str = TO_STRING(@timestamp), ts_l = TO_LONG(@timestamp), ip = TO_IP(client_ip), ip_str = TO_STRING(client_ip) +| SORT _index ASC, ts DESC +; + +@timestamp:null | client_ip:null | event_duration:long | message:keyword | _index:keyword | ts:date | ts_str:keyword | ts_l:long | ip:ip | ip_str:k +null | null | 8268153 | Connection error | sample_data | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 +null | null | 8268153 | Connection error | sample_data_str | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 +null | null | 8268153 | Connection error | sample_data_ts_long | 2023-10-23T13:52:55.015Z | 1698069175015 | 1698069175015 | 172.21.3.15 | 172.21.3.15 +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index e65f574422dd5..654c1ffd8a5e9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -77,6 +77,11 @@ public class EsqlCapabilities { */ public static final String STRING_LITERAL_AUTO_CASTING_TO_DATETIME_ADD_SUB = "string_literal_auto_casting_to_datetime_add_sub"; + /** + * Support multiple field mappings if appropriate conversion function is used (union types) + */ + public static final String UNION_TYPES = "union_types"; + /** * Support for named or positional parameters in EsqlQueryRequest. */ @@ -94,6 +99,7 @@ private static Set capabilities() { caps.add(METADATA_IGNORED_FIELD); caps.add(FN_MV_APPEND); caps.add(REPEAT); + caps.add(UNION_TYPES); caps.add(NAMED_POSITIONAL_PARAMETER); if (Build.current().isSnapshot()) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 70fbe17a7d470..77a51c8415545 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -27,6 +27,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.expression.Nullability; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; @@ -59,6 +60,7 @@ import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.DateTimeArithmeticOperation; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; @@ -80,11 +82,13 @@ import org.elasticsearch.xpack.esql.stats.FeatureMetric; import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.esql.type.MultiTypeEsField; import java.util.ArrayList; import java.util.Arrays; import java.util.BitSet; import java.util.Collection; +import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; @@ -132,8 +136,13 @@ public class Analyzer extends ParameterizedRuleExecutor("Resolution", new ResolveRefs(), new ImplicitCasting()); - var finish = new Batch<>("Finish Analysis", Limiter.ONCE, new AddImplicitLimit()); + var resolution = new Batch<>( + "Resolution", + new ResolveRefs(), + new ResolveUnionTypes(), // Must be after ResolveRefs, so union types can be found + new ImplicitCasting() + ); + var finish = new Batch<>("Finish Analysis", Limiter.ONCE, new AddImplicitLimit(), new UnresolveUnionTypes()); rules = List.of(init, resolution, finish); } @@ -851,14 +860,6 @@ private static List potentialCandidatesIfNoMatchesFound( } private static Attribute handleSpecialFields(UnresolvedAttribute u, Attribute named) { - if (named instanceof FieldAttribute fa) { - // incompatible mappings - var field = fa.field(); - if (field instanceof InvalidMappedField imf) { - named = u.withUnresolvedMessage("Cannot use field [" + fa.name() + "] due to ambiguities being " + imf.errorMessage()); - } - } - return named.withLocation(u.source()); } @@ -1061,4 +1062,155 @@ public static Expression castStringLiteral(Expression from, DataType target) { } } } + + /** + * The EsqlIndexResolver will create InvalidMappedField instances for fields that are ambiguous (i.e. have multiple mappings). + * During ResolveRefs we do not convert these to UnresolvedAttribute instances, as we want to first determine if they can + * instead be handled by conversion functions within the query. This rule looks for matching conversion functions and converts + * those fields into MultiTypeEsField, which encapsulates the knowledge of how to convert these into a single type. + * This knowledge will be used later in generating the FieldExtractExec with built-in type conversion. + * Any fields which could not be resolved by conversion functions will be converted to UnresolvedAttribute instances in a later rule + * (See UnresolveUnionTypes below). + */ + private static class ResolveUnionTypes extends BaseAnalyzerRule { + + record TypeResolutionKey(String fieldName, DataType fieldType) {} + + @Override + protected LogicalPlan doRule(LogicalPlan plan) { + List unionFieldAttributes = new ArrayList<>(); + // See if the eval function has an unresolved MultiTypeEsField field + // Replace the entire convert function with a new FieldAttribute (containing type conversion knowledge) + plan = plan.transformExpressionsOnly( + AbstractConvertFunction.class, + convert -> resolveConvertFunction(convert, unionFieldAttributes) + ); + // If no union fields were generated, return the plan as is + if (unionFieldAttributes.isEmpty()) { + return plan; + } + + // Otherwise drop the converted attributes after the alias function, as they are only needed for this function, and + // the original version of the attribute should still be seen as unconverted. + plan = dropConvertedAttributes(plan, unionFieldAttributes); + + // And add generated fields to EsRelation, so these new attributes will appear in the OutputExec of the Fragment + // and thereby get used in FieldExtractExec + plan = plan.transformDown(EsRelation.class, esr -> { + List output = esr.output(); + List missing = new ArrayList<>(); + for (FieldAttribute fa : unionFieldAttributes) { + if (output.stream().noneMatch(a -> a.id().equals(fa.id()))) { + missing.add(fa); + } + } + if (missing.isEmpty() == false) { + output.addAll(missing); + return new EsRelation(esr.source(), esr.index(), output, esr.indexMode(), esr.frozen()); + } + return esr; + }); + return plan; + } + + private LogicalPlan dropConvertedAttributes(LogicalPlan plan, List unionFieldAttributes) { + List projections = new ArrayList<>(plan.output()); + for (var e : unionFieldAttributes) { + projections.removeIf(p -> p.id().equals(e.id())); + } + if (projections.size() != plan.output().size()) { + return new EsqlProject(plan.source(), plan, projections); + } + return plan; + } + + private Expression resolveConvertFunction(AbstractConvertFunction convert, List unionFieldAttributes) { + if (convert.field() instanceof FieldAttribute fa && fa.field() instanceof InvalidMappedField imf) { + HashMap typeResolutions = new HashMap<>(); + Set supportedTypes = convert.supportedTypes(); + imf.getTypesToIndices().keySet().forEach(typeName -> { + DataType type = DataType.fromTypeName(typeName); + if (supportedTypes.contains(type)) { + TypeResolutionKey key = new TypeResolutionKey(fa.name(), type); + var concreteConvert = typeSpecificConvert(convert, fa.source(), type, imf); + typeResolutions.put(key, concreteConvert); + } + }); + // If all mapped types were resolved, create a new FieldAttribute with the resolved MultiTypeEsField + if (typeResolutions.size() == imf.getTypesToIndices().size()) { + var resolvedField = resolvedMultiTypeEsField(fa, typeResolutions); + return createIfDoesNotAlreadyExist(fa, resolvedField, unionFieldAttributes); + } + } else if (convert.field() instanceof AbstractConvertFunction subConvert) { + return convert.replaceChildren(Collections.singletonList(resolveConvertFunction(subConvert, unionFieldAttributes))); + } + return convert; + } + + private Expression createIfDoesNotAlreadyExist( + FieldAttribute fa, + MultiTypeEsField resolvedField, + List unionFieldAttributes + ) { + var unionFieldAttribute = new FieldAttribute(fa.source(), fa.name(), resolvedField); // Generates new ID for the field + int existingIndex = unionFieldAttributes.indexOf(unionFieldAttribute); + if (existingIndex >= 0) { + // Do not generate multiple name/type combinations with different IDs + return unionFieldAttributes.get(existingIndex); + } else { + unionFieldAttributes.add(unionFieldAttribute); + return unionFieldAttribute; + } + } + + private MultiTypeEsField resolvedMultiTypeEsField(FieldAttribute fa, HashMap typeResolutions) { + Map typesToConversionExpressions = new HashMap<>(); + InvalidMappedField imf = (InvalidMappedField) fa.field(); + imf.getTypesToIndices().forEach((typeName, indexNames) -> { + DataType type = DataType.fromTypeName(typeName); + TypeResolutionKey key = new TypeResolutionKey(fa.name(), type); + if (typeResolutions.containsKey(key)) { + typesToConversionExpressions.put(typeName, typeResolutions.get(key)); + } + }); + return MultiTypeEsField.resolveFrom(imf, typesToConversionExpressions); + } + + private Expression typeSpecificConvert(AbstractConvertFunction convert, Source source, DataType type, InvalidMappedField mtf) { + EsField field = new EsField(mtf.getName(), type, mtf.getProperties(), mtf.isAggregatable()); + NameId id = ((FieldAttribute) convert.field()).id(); + FieldAttribute resolvedAttr = new FieldAttribute(source, null, field.getName(), field, null, Nullability.TRUE, id, false); + return convert.replaceChildren(Collections.singletonList(resolvedAttr)); + } + } + + /** + * If there was no AbstractConvertFunction that resolved multi-type fields in the ResolveUnionTypes rules, + * then there could still be some FieldAttributes that contain unresolved MultiTypeEsFields. + * These need to be converted back to actual UnresolvedAttribute in order for validation to generate appropriate failures. + */ + private static class UnresolveUnionTypes extends AnalyzerRules.AnalyzerRule { + @Override + protected boolean skipResolved() { + return false; + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + if (plan instanceof EsRelation esRelation) { + // Leave esRelation as InvalidMappedField so that UNSUPPORTED fields can still pass through + return esRelation; + } + return plan.transformExpressionsOnly(FieldAttribute.class, UnresolveUnionTypes::checkUnresolved); + } + + private static Attribute checkUnresolved(FieldAttribute fa) { + var field = fa.field(); + if (field instanceof InvalidMappedField imf) { + String unresolvedMessage = "Cannot use field [" + fa.name() + "] due to ambiguities being " + imf.errorMessage(); + return new UnresolvedAttribute(fa.source(), fa.name(), fa.qualifier(), fa.id(), unresolvedMessage, null); + } + return fa; + } + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java index 2496d8b82fa6f..96601905d40c9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java @@ -77,7 +77,11 @@ protected final TypeResolution resolveType() { if (childrenResolved() == false) { return new TypeResolution("Unresolved children"); } - return isType(field(), factories()::containsKey, sourceText(), null, supportedTypesNames(factories().keySet())); + return isType(field(), factories()::containsKey, sourceText(), null, supportedTypesNames(supportedTypes())); + } + + public Set supportedTypes() { + return factories().keySet(); } public static String supportedTypesNames(Set types) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java index b7e4fc9ae622f..08916c14e91bf 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java @@ -92,6 +92,8 @@ public List output() { @Override public boolean expressionsResolved() { + // For unresolved expressions to exist in EsRelation is fine, as long as they are not used in later operations + // This allows for them to be converted to null@unsupported fields in final output, an important feature of ES|QL return true; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java index 04ed433200c2f..fdba785f668d7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java @@ -7,20 +7,28 @@ package org.elasticsearch.xpack.esql.planner; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.logging.HeaderWarning; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.aggregation.GroupingAggregator; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.lucene.LuceneCountOperator; import org.elasticsearch.compute.lucene.LuceneOperator; import org.elasticsearch.compute.lucene.LuceneSourceOperator; import org.elasticsearch.compute.lucene.LuceneTopNSourceOperator; import org.elasticsearch.compute.lucene.TimeSeriesSortedSourceOperatorFactory; import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.OrdinalsGroupingOperator; import org.elasticsearch.compute.operator.SourceOperator; @@ -35,13 +43,16 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.search.NestedHelper; +import org.elasticsearch.search.fetch.StoredFieldsSpec; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec.FieldSort; @@ -50,6 +61,7 @@ import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.LocalExecutionPlannerContext; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.PhysicalOperation; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.esql.type.MultiTypeEsField; import java.io.IOException; import java.util.ArrayList; @@ -102,17 +114,42 @@ public final PhysicalOperation fieldExtractPhysicalOperation(FieldExtractExec fi var docValuesAttrs = fieldExtractExec.docValuesAttributes(); for (Attribute attr : fieldExtractExec.attributesToExtract()) { layout.append(attr); + var unionTypes = findUnionTypes(attr); DataType dataType = attr.dataType(); MappedFieldType.FieldExtractPreference fieldExtractPreference = PlannerUtils.extractPreference(docValuesAttrs.contains(attr)); ElementType elementType = PlannerUtils.toElementType(dataType, fieldExtractPreference); String fieldName = attr.name(); boolean isUnsupported = EsqlDataTypes.isUnsupported(dataType); - IntFunction loader = s -> shardContexts.get(s).blockLoader(fieldName, isUnsupported, fieldExtractPreference); + IntFunction loader = s -> getBlockLoaderFor(s, fieldName, isUnsupported, fieldExtractPreference, unionTypes); fields.add(new ValuesSourceReaderOperator.FieldInfo(fieldName, elementType, loader)); } return source.with(new ValuesSourceReaderOperator.Factory(fields, readers, docChannel), layout.build()); } + private BlockLoader getBlockLoaderFor( + int shardId, + String fieldName, + boolean isUnsupported, + MappedFieldType.FieldExtractPreference fieldExtractPreference, + MultiTypeEsField unionTypes + ) { + DefaultShardContext shardContext = (DefaultShardContext) shardContexts.get(shardId); + BlockLoader blockLoader = shardContext.blockLoader(fieldName, isUnsupported, fieldExtractPreference); + if (unionTypes != null) { + String indexName = shardContext.ctx.index().getName(); + Expression conversion = unionTypes.getConversionExpressionForIndex(indexName); + return new TypeConvertingBlockLoader(blockLoader, (AbstractConvertFunction) conversion); + } + return blockLoader; + } + + private MultiTypeEsField findUnionTypes(Attribute attr) { + if (attr instanceof FieldAttribute fa && fa.field() instanceof MultiTypeEsField multiTypeEsField) { + return multiTypeEsField; + } + return null; + } + public Function querySupplier(QueryBuilder builder) { QueryBuilder qb = builder == null ? QueryBuilders.matchAllQuery() : builder; return ctx -> shardContexts.get(ctx.index()).toQuery(qb); @@ -321,4 +358,96 @@ public FieldNamesFieldMapper.FieldNamesFieldType fieldNames() { return loader; } } + + static class TypeConvertingBlockLoader implements BlockLoader { + protected final BlockLoader delegate; + private final EvalOperator.ExpressionEvaluator convertEvaluator; + + protected TypeConvertingBlockLoader(BlockLoader delegate, AbstractConvertFunction convertFunction) { + this.delegate = delegate; + DriverContext driverContext1 = new DriverContext( + BigArrays.NON_RECYCLING_INSTANCE, + new org.elasticsearch.compute.data.BlockFactory( + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + BigArrays.NON_RECYCLING_INSTANCE + ) + ); + this.convertEvaluator = convertFunction.toEvaluator(e -> driverContext -> new EvalOperator.ExpressionEvaluator() { + @Override + public org.elasticsearch.compute.data.Block eval(Page page) { + // This is a pass-through evaluator, since it sits directly on the source loading (no prior expressions) + return page.getBlock(0); + } + + @Override + public void close() {} + }).get(driverContext1); + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + // Return the delegates builder, which can build the original mapped type, before conversion + return delegate.builder(factory, expectedCount); + } + + @Override + public Block convert(Block block) { + Page page = new Page((org.elasticsearch.compute.data.Block) block); + return convertEvaluator.eval(page); + } + + @Override + public ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) throws IOException { + ColumnAtATimeReader reader = delegate.columnAtATimeReader(context); + if (reader == null) { + return null; + } + return new ColumnAtATimeReader() { + @Override + public Block read(BlockFactory factory, Docs docs) throws IOException { + Block block = reader.read(factory, docs); + Page page = new Page((org.elasticsearch.compute.data.Block) block); + org.elasticsearch.compute.data.Block converted = convertEvaluator.eval(page); + return converted; + } + + @Override + public boolean canReuse(int startingDocID) { + return reader.canReuse(startingDocID); + } + + @Override + public String toString() { + return reader.toString(); + } + }; + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) throws IOException { + // We do no type conversion here, since that will be done in the ValueSourceReaderOperator for row-stride cases + // Using the BlockLoader.convert(Block) function defined above + return delegate.rowStrideReader(context); + } + + @Override + public StoredFieldsSpec rowStrideStoredFieldSpec() { + return delegate.rowStrideStoredFieldSpec(); + } + + @Override + public boolean supportsOrdinals() { + return delegate.supportsOrdinals(); + } + + @Override + public SortedSetDocValues ordinals(LeafReaderContext context) throws IOException { + return delegate.ordinals(context); + } + + @Override + public final String toString() { + return "TypeConvertingBlockLoader[delegate=" + delegate + ", convertEvaluator=" + convertEvaluator + "]"; + } + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java index d3b2d5c6e7646..fc00f5be22624 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java @@ -64,6 +64,7 @@ import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; import org.elasticsearch.xpack.esql.session.IndexResolver; import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; +import org.elasticsearch.xpack.esql.type.MultiTypeEsField; import java.lang.invoke.MethodHandles; import java.util.ArrayList; @@ -188,6 +189,7 @@ public List getNamedWriteables() { entries.add(UnsupportedAttribute.ENTRY); // TODO combine with above once these are in the same project entries.addAll(NamedExpression.getNamedWriteables()); entries.add(UnsupportedAttribute.NAMED_EXPRESSION_ENTRY); // TODO combine with above once these are in the same project + entries.add(MultiTypeEsField.ENTRY); // TODO combine with EsField.getNamedWriteables() once these are in the same module return entries; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java index 983a45f36169e..5fd7f0c230463 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java @@ -225,26 +225,10 @@ private EsField conflictingTypes(String name, String fullName, FieldCapabilities if (type == UNSUPPORTED) { return unsupported(name, fc); } - typesToIndices.computeIfAbsent(type.esType(), _key -> new TreeSet<>()).add(ir.getIndexName()); + typesToIndices.computeIfAbsent(type.typeName(), _key -> new TreeSet<>()).add(ir.getIndexName()); } } - StringBuilder errorMessage = new StringBuilder(); - errorMessage.append("mapped as ["); - errorMessage.append(typesToIndices.size()); - errorMessage.append("] incompatible types: "); - boolean first = true; - for (Map.Entry> e : typesToIndices.entrySet()) { - if (first) { - first = false; - } else { - errorMessage.append(", "); - } - errorMessage.append("["); - errorMessage.append(e.getKey()); - errorMessage.append("] in "); - errorMessage.append(e.getValue()); - } - return new InvalidMappedField(name, errorMessage.toString()); + return new InvalidMappedField(name, typesToIndices); } private EsField conflictingMetricTypes(String name, String fullName, FieldCapabilitiesResponse fieldCapsResponse) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/MultiTypeEsField.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/MultiTypeEsField.java new file mode 100644 index 0000000000000..2b963e7428e2b --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/MultiTypeEsField.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.type; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +/** + * During IndexResolution it could occur that the same field is mapped to different types in different indices. + * The class MultiTypeEfField.UnresolvedField holds that information and allows for later resolution of the field + * to a single type during LogicalPlanOptimization. + * If the plan contains conversion expressions for the different types, the resolution will be done using the conversion expressions, + * in which case a MultiTypeEsField will be created to encapsulate the type resolution capabilities. + * This class can be communicated to the data nodes and used during physical planning to influence field extraction so that + * type conversion is done at the data node level. + */ +public class MultiTypeEsField extends EsField { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + EsField.class, + "MultiTypeEsField", + MultiTypeEsField::new + ); + + private final Map indexToConversionExpressions; + + public MultiTypeEsField(String name, DataType dataType, boolean aggregatable, Map indexToConversionExpressions) { + super(name, dataType, Map.of(), aggregatable); + this.indexToConversionExpressions = indexToConversionExpressions; + } + + public MultiTypeEsField(StreamInput in) throws IOException { + // TODO: Change the conversion expression serialization to i.readNamedWriteable(Expression.class) once Expression is fully supported + this(in.readString(), DataType.readFrom(in), in.readBoolean(), in.readImmutableMap(i -> ((PlanStreamInput) i).readExpression())); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(getName()); + out.writeString(getDataType().typeName()); + out.writeBoolean(isAggregatable()); + out.writeMap(getIndexToConversionExpressions(), (o, v) -> out.writeNamedWriteable(v)); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + public Map getIndexToConversionExpressions() { + return indexToConversionExpressions; + } + + public Expression getConversionExpressionForIndex(String indexName) { + return indexToConversionExpressions.get(indexName); + } + + public static MultiTypeEsField resolveFrom( + InvalidMappedField invalidMappedField, + Map typesToConversionExpressions + ) { + Map> typesToIndices = invalidMappedField.getTypesToIndices(); + DataType resolvedDataType = DataType.UNSUPPORTED; + Map indexToConversionExpressions = new HashMap<>(); + for (String typeName : typesToIndices.keySet()) { + Set indices = typesToIndices.get(typeName); + Expression convertExpr = typesToConversionExpressions.get(typeName); + if (resolvedDataType == DataType.UNSUPPORTED) { + resolvedDataType = convertExpr.dataType(); + } else if (resolvedDataType != convertExpr.dataType()) { + throw new IllegalArgumentException("Resolved data type mismatch: " + resolvedDataType + " != " + convertExpr.dataType()); + } + for (String indexName : indices) { + indexToConversionExpressions.put(indexName, convertExpr); + } + } + return new MultiTypeEsField(invalidMappedField.getName(), resolvedDataType, false, indexToConversionExpressions); + } + + @Override + public boolean equals(Object obj) { + if (super.equals(obj) == false) { + return false; + } + if (obj instanceof MultiTypeEsField other) { + return super.equals(other) && indexToConversionExpressions.equals(other.indexToConversionExpressions); + } + return false; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), indexToConversionExpressions); + } + + @Override + public String toString() { + return super.toString() + " (" + indexToConversionExpressions + ")"; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 44466cebb7dac..27aa985efd6d0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -222,6 +222,14 @@ public CsvTests(String fileName, String groupName, String testName, Integer line public final void test() throws Throwable { try { assumeTrue("Test " + testName + " is not enabled", isEnabled(testName, Version.CURRENT)); + /* + * The csv tests support all but a few features. The unsupported features + * are tested in integration tests. + */ + assumeFalse("metadata fields aren't supported", testCase.requiredCapabilities.contains(cap(EsqlFeatures.METADATA_FIELDS))); + assumeFalse("enrich can't load fields in csv tests", testCase.requiredCapabilities.contains(cap(EsqlFeatures.ENRICH_LOAD))); + assumeFalse("can't load metrics in csv tests", testCase.requiredCapabilities.contains(cap(EsqlFeatures.METRICS_SYNTAX))); + assumeFalse("multiple indices aren't supported", testCase.requiredCapabilities.contains(EsqlCapabilities.UNION_TYPES)); if (Build.current().isSnapshot()) { assertThat( @@ -231,14 +239,6 @@ public final void test() throws Throwable { ); } - /* - * The csv tests support all but a few features. The unsupported features - * are tested in integration tests. - */ - assumeFalse("metadata fields aren't supported", testCase.requiredCapabilities.contains(cap(EsqlFeatures.METADATA_FIELDS))); - assumeFalse("enrich can't load fields in csv tests", testCase.requiredCapabilities.contains(cap(EsqlFeatures.ENRICH_LOAD))); - assumeFalse("can't load metrics in csv tests", testCase.requiredCapabilities.contains(cap(EsqlFeatures.METRICS_SYNTAX))); - doTest(); } catch (Throwable th) { throw reworkException(th); @@ -334,7 +334,7 @@ private PhysicalPlan physicalPlan(LogicalPlan parsed, CsvTestsDataLoader.TestsDa private static CsvTestsDataLoader.TestsDataset testsDataset(LogicalPlan parsed) { var preAnalysis = new PreAnalyzer().preAnalyze(parsed); var indices = preAnalysis.indices; - if (indices.size() == 0) { + if (indices.isEmpty()) { /* * If the data set doesn't matter we'll just grab one we know works. * Employees is fine. @@ -345,11 +345,23 @@ private static CsvTestsDataLoader.TestsDataset testsDataset(LogicalPlan parsed) } String indexName = indices.get(0).id().index(); - var dataset = CSV_DATASET_MAP.get(indexName); - if (dataset == null) { + List datasets = new ArrayList<>(); + if (indexName.endsWith("*")) { + String indexPrefix = indexName.substring(0, indexName.length() - 1); + for (var entry : CSV_DATASET_MAP.entrySet()) { + if (entry.getKey().startsWith(indexPrefix)) { + datasets.add(entry.getValue()); + } + } + } else { + var dataset = CSV_DATASET_MAP.get(indexName); + datasets.add(dataset); + } + if (datasets.isEmpty()) { throw new IllegalArgumentException("unknown CSV dataset for table [" + indexName + "]"); } - return dataset; + // TODO: Support multiple datasets + return datasets.get(0); } private static TestPhysicalOperationProviders testOperationProviders(CsvTestsDataLoader.TestsDataset dataset) throws Exception { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java new file mode 100644 index 0000000000000..86baee58ca53f --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java @@ -0,0 +1,188 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.type; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.test.AbstractNamedWriteableTestCase; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToBoolean; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianPoint; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianShape; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDatetime; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDouble; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToGeoPoint; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToGeoShape; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToIP; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToInteger; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToLong; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToString; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToVersion; +import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import org.elasticsearch.xpack.esql.session.EsqlConfiguration; +import org.elasticsearch.xpack.esql.session.EsqlConfigurationSerializationTests; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isString; + +/** + * This test was originally based on the tests for sub-classes of EsField, like InvalidMappedFieldTests. + * However, it has a few important differences: + *
      + *
    • It is not in the esql.core module, but in the esql module, in order to have access to the sub-classes of AbstractConvertFunction, + * like ToString, which are important conversion Expressions used in the union-types feature.
    • + *
    • It extends AbstractNamedWriteableTestCase instead of AbstractEsFieldTypeTests, + * in order to wrap the StreamInput with a PlanStreamInput, since Expression is not yet fully supported in the new + * serialization approach (NamedWritable).
    • + *
    + * These differences can be minimized once Expression is fully supported in the new serialization approach, and the esql and esql.core + * modules are merged, or at least the relevant classes are moved. + */ +public class MultiTypeEsFieldTests extends AbstractNamedWriteableTestCase { + + private EsqlConfiguration config; + + @Before + public void initConfig() { + config = EsqlConfigurationSerializationTests.randomConfiguration(); + } + + @Override + protected MultiTypeEsField createTestInstance() { + String name = randomAlphaOfLength(4); + boolean toString = randomBoolean(); + DataType dataType = randomFrom(types()); + DataType toType = toString ? DataType.KEYWORD : dataType; + Map indexToConvertExpressions = randomConvertExpressions(name, toString, dataType); + return new MultiTypeEsField(name, toType, false, indexToConvertExpressions); + } + + @Override + protected MultiTypeEsField mutateInstance(MultiTypeEsField instance) throws IOException { + String name = instance.getName(); + DataType dataType = instance.getDataType(); + Map indexToConvertExpressions = instance.getIndexToConversionExpressions(); + switch (between(0, 2)) { + case 0 -> name = randomAlphaOfLength(name.length() + 1); + case 1 -> dataType = randomValueOtherThan(dataType, () -> randomFrom(DataType.types())); + case 2 -> indexToConvertExpressions = mutateConvertExpressions(name, dataType, indexToConvertExpressions); + default -> throw new IllegalArgumentException(); + } + return new MultiTypeEsField(name, dataType, false, indexToConvertExpressions); + } + + @Override + protected final NamedWriteableRegistry getNamedWriteableRegistry() { + List entries = new ArrayList<>(UnaryScalarFunction.getNamedWriteables()); + entries.addAll(Attribute.getNamedWriteables()); + entries.addAll(EsField.getNamedWriteables()); + entries.add(new NamedWriteableRegistry.Entry(MultiTypeEsField.class, "MultiTypeEsField", MultiTypeEsField::new)); + return new NamedWriteableRegistry(entries); + } + + @Override + protected final Class categoryClass() { + return MultiTypeEsField.class; + } + + @Override + protected final MultiTypeEsField copyInstance(MultiTypeEsField instance, TransportVersion version) throws IOException { + return copyInstance( + instance, + getNamedWriteableRegistry(), + (out, v) -> new PlanStreamOutput(out, new PlanNameRegistry(), config).writeNamedWriteable(v), + in -> { + PlanStreamInput pin = new PlanStreamInput(in, new PlanNameRegistry(), in.namedWriteableRegistry(), config); + return pin.readNamedWriteable(MultiTypeEsField.class); + }, + version + ); + } + + private static Map randomConvertExpressions(String name, boolean toString, DataType dataType) { + Map indexToConvertExpressions = new HashMap<>(); + if (toString) { + indexToConvertExpressions.put(randomAlphaOfLength(4), new ToString(Source.EMPTY, fieldAttribute(name, dataType))); + indexToConvertExpressions.put(randomAlphaOfLength(4), new ToString(Source.EMPTY, fieldAttribute(name, DataType.KEYWORD))); + } else { + indexToConvertExpressions.put(randomAlphaOfLength(4), testConvertExpression(name, DataType.KEYWORD, dataType)); + indexToConvertExpressions.put(randomAlphaOfLength(4), testConvertExpression(name, dataType, dataType)); + } + return indexToConvertExpressions; + } + + private Map mutateConvertExpressions( + String name, + DataType toType, + Map indexToConvertExpressions + ) { + return randomValueOtherThan( + indexToConvertExpressions, + () -> randomConvertExpressions(name, toType == DataType.KEYWORD, randomFrom(types())) + ); + } + + private static List types() { + return List.of( + DataType.BOOLEAN, + DataType.DATETIME, + DataType.DOUBLE, + DataType.FLOAT, + DataType.INTEGER, + DataType.IP, + DataType.KEYWORD, + DataType.LONG, + DataType.GEO_POINT, + DataType.GEO_SHAPE, + DataType.CARTESIAN_POINT, + DataType.CARTESIAN_SHAPE, + DataType.VERSION + ); + } + + private static Expression testConvertExpression(String name, DataType fromType, DataType toType) { + FieldAttribute fromField = fieldAttribute(name, fromType); + if (isString(toType)) { + return new ToString(Source.EMPTY, fromField); + } else { + return switch (toType) { + case BOOLEAN -> new ToBoolean(Source.EMPTY, fromField); + case DATETIME -> new ToDatetime(Source.EMPTY, fromField); + case DOUBLE, FLOAT -> new ToDouble(Source.EMPTY, fromField); + case INTEGER -> new ToInteger(Source.EMPTY, fromField); + case LONG -> new ToLong(Source.EMPTY, fromField); + case IP -> new ToIP(Source.EMPTY, fromField); + case KEYWORD -> new ToString(Source.EMPTY, fromField); + case GEO_POINT -> new ToGeoPoint(Source.EMPTY, fromField); + case GEO_SHAPE -> new ToGeoShape(Source.EMPTY, fromField); + case CARTESIAN_POINT -> new ToCartesianPoint(Source.EMPTY, fromField); + case CARTESIAN_SHAPE -> new ToCartesianShape(Source.EMPTY, fromField); + case VERSION -> new ToVersion(Source.EMPTY, fromField); + default -> throw new UnsupportedOperationException("Conversion from " + fromType + " to " + toType + " is not supported"); + }; + } + } + + private static FieldAttribute fieldAttribute(String name, DataType dataType) { + return new FieldAttribute(Source.EMPTY, name, new EsField(name, dataType, Map.of(), true)); + } +} diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/160_union_types.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/160_union_types.yml new file mode 100644 index 0000000000000..f3403ca8751c0 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/160_union_types.yml @@ -0,0 +1,573 @@ +setup: + - requires: + capabilities: + - method: POST + path: /_query + parameters: [method, path, parameters, capabilities] + capabilities: [union_types] + reason: "Union types introduced in 8.15.0" + test_runner_features: [capabilities, allowed_warnings_regex] + + - do: + indices.create: + index: events_ip_long + body: + mappings: + properties: + "@timestamp": + type: date + client_ip: + type: ip + event_duration: + type: long + message: + type: keyword + + - do: + bulk: + refresh: true + index: events_ip_long + body: + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:55:01.543Z", "client_ip": "172.21.3.15", "event_duration": 1756467, "message": "Connected to 10.1.0.1"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:53:55.832Z", "client_ip": "172.21.3.15", "event_duration": 5033755, "message": "Connection error"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:52:55.015Z", "client_ip": "172.21.3.15", "event_duration": 8268153, "message": "Connection error"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:51:54.732Z", "client_ip": "172.21.3.15", "event_duration": 725448, "message": "Connection error"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:33:34.937Z", "client_ip": "172.21.0.5", "event_duration": 1232382, "message": "Disconnected"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T12:27:28.948Z", "client_ip": "172.21.2.113", "event_duration": 2764889, "message": "Connected to 10.1.0.2"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T12:15:03.360Z", "client_ip": "172.21.2.162", "event_duration": 3450233, "message": "Connected to 10.1.0.3"}' + - do: + indices.create: + index: events_keyword_long + body: + mappings: + properties: + "@timestamp": + type: date + client_ip: + type: keyword + event_duration: + type: long + message: + type: keyword + + - do: + bulk: + refresh: true + index: events_keyword_long + body: + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:55:01.543Z", "client_ip": "172.21.3.15", "event_duration": 1756467, "message": "Connected to 10.1.0.1"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:53:55.832Z", "client_ip": "172.21.3.15", "event_duration": 5033755, "message": "Connection error"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:52:55.015Z", "client_ip": "172.21.3.15", "event_duration": 8268153, "message": "Connection error"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:51:54.732Z", "client_ip": "172.21.3.15", "event_duration": 725448, "message": "Connection error"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:33:34.937Z", "client_ip": "172.21.0.5", "event_duration": 1232382, "message": "Disconnected"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T12:27:28.948Z", "client_ip": "172.21.2.113", "event_duration": 2764889, "message": "Connected to 10.1.0.2"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T12:15:03.360Z", "client_ip": "172.21.2.162", "event_duration": 3450233, "message": "Connected to 10.1.0.3"}' + + - do: + indices.create: + index: events_ip_keyword + body: + mappings: + properties: + "@timestamp": + type: date + client_ip: + type: ip + event_duration: + type: keyword + message: + type: keyword + + - do: + bulk: + refresh: true + index: events_ip_keyword + body: + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:55:01.543Z", "client_ip": "172.21.3.15", "event_duration": "1756467", "message": "Connected to 10.1.0.1"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:53:55.832Z", "client_ip": "172.21.3.15", "event_duration": "5033755", "message": "Connection error"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:52:55.015Z", "client_ip": "172.21.3.15", "event_duration": "8268153", "message": "Connection error"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:51:54.732Z", "client_ip": "172.21.3.15", "event_duration": "725448", "message": "Connection error"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:33:34.937Z", "client_ip": "172.21.0.5", "event_duration": "1232382", "message": "Disconnected"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T12:27:28.948Z", "client_ip": "172.21.2.113", "event_duration": "2764889", "message": "Connected to 10.1.0.2"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T12:15:03.360Z", "client_ip": "172.21.2.162", "event_duration": "3450233", "message": "Connected to 10.1.0.3"}' + + - do: + indices.create: + index: events_keyword_keyword + body: + mappings: + properties: + "@timestamp": + type: date + client_ip: + type: keyword + event_duration: + type: keyword + message: + type: keyword + + - do: + bulk: + refresh: true + index: events_keyword_keyword + body: + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:55:01.543Z", "client_ip": "172.21.3.15", "event_duration": "1756467", "message": "Connected to 10.1.0.1"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:53:55.832Z", "client_ip": "172.21.3.15", "event_duration": "5033755", "message": "Connection error"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:52:55.015Z", "client_ip": "172.21.3.15", "event_duration": "8268153", "message": "Connection error"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:51:54.732Z", "client_ip": "172.21.3.15", "event_duration": "725448", "message": "Connection error"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T13:33:34.937Z", "client_ip": "172.21.0.5", "event_duration": "1232382", "message": "Disconnected"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T12:27:28.948Z", "client_ip": "172.21.2.113", "event_duration": "2764889", "message": "Connected to 10.1.0.2"}' + - '{"index": {}}' + - '{"@timestamp": "2023-10-23T12:15:03.360Z", "client_ip": "172.21.2.162", "event_duration": "3450233", "message": "Connected to 10.1.0.3"}' + +--- +load single index ip_long: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_ip_long METADATA _index | KEEP _index, @timestamp, client_ip, event_duration, message | SORT _index ASC, @timestamp DESC' + + - match: { columns.0.name: "_index" } + - match: { columns.0.type: "keyword" } + - match: { columns.1.name: "@timestamp" } + - match: { columns.1.type: "date" } + - match: { columns.2.name: "client_ip" } + - match: { columns.2.type: "ip" } + - match: { columns.3.name: "event_duration" } + - match: { columns.3.type: "long" } + - match: { columns.4.name: "message" } + - match: { columns.4.type: "keyword" } + - length: { values: 7 } + - match: { values.0.0: "events_ip_long" } + - match: { values.0.1: "2023-10-23T13:55:01.543Z" } + - match: { values.0.2: "172.21.3.15" } + - match: { values.0.3: 1756467 } + - match: { values.0.4: "Connected to 10.1.0.1" } + +############################################################################################################ +# Test a single index as a control of the expected results + +--- +load single index keyword_keyword: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_keyword_keyword METADATA _index | KEEP _index, @timestamp, client_ip, event_duration, message | SORT _index ASC, @timestamp DESC' + + - match: { columns.0.name: "_index" } + - match: { columns.0.type: "keyword" } + - match: { columns.1.name: "@timestamp" } + - match: { columns.1.type: "date" } + - match: { columns.2.name: "client_ip" } + - match: { columns.2.type: "keyword" } + - match: { columns.3.name: "event_duration" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "message" } + - match: { columns.4.type: "keyword" } + - length: { values: 7 } + - match: { values.0.0: "events_keyword_keyword" } + - match: { values.0.1: "2023-10-23T13:55:01.543Z" } + - match: { values.0.2: "172.21.3.15" } + - match: { values.0.3: "1756467" } + - match: { values.0.4: "Connected to 10.1.0.1" } + +############################################################################################################ +# Test two indices where the event_duration is mapped as a LONG and as a KEYWORD + +--- +load two indices, showing unsupported type and null value for event_duration: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_ip_* METADATA _index | SORT _index ASC, @timestamp DESC' + + - length: { values: 14 } + + - match: { columns.0.name: "@timestamp" } + - match: { columns.0.type: "date" } + - match: { columns.1.name: "client_ip" } + - match: { columns.1.type: "ip" } + - match: { columns.2.name: "event_duration" } + - match: { columns.2.type: "unsupported" } + - match: { columns.3.name: "message" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "_index" } + - match: { columns.4.type: "keyword" } + - length: { values: 14 } + - match: { values.0.0: "2023-10-23T13:55:01.543Z" } + - match: { values.0.1: "172.21.3.15" } + - match: { values.0.2: null } + - match: { values.0.3: "Connected to 10.1.0.1" } + - match: { values.0.4: "events_ip_keyword" } + - match: { values.7.0: "2023-10-23T13:55:01.543Z" } + - match: { values.7.1: "172.21.3.15" } + - match: { values.7.2: null } + - match: { values.7.3: "Connected to 10.1.0.1" } + - match: { values.7.4: "events_ip_long" } + +--- +load two indices with no conversion function, but needs TO_LONG conversion: + - do: + catch: '/Cannot use field \[event_duration\] due to ambiguities being mapped as \[2\] incompatible types: \[keyword\] in \[events_ip_keyword\], \[long\] in \[events_ip_long\]/' + esql.query: + body: + query: 'FROM events_ip_* METADATA _index | KEEP _index, @timestamp, client_ip, event_duration, message | SORT _index ASC, @timestamp DESC' + +--- +load two indices with incorrect conversion function, TO_IP instead of TO_LONG: + - do: + catch: '/Cannot use field \[event_duration\] due to ambiguities being mapped as \[2\] incompatible types: \[keyword\] in \[events_ip_keyword\], \[long\] in \[events_ip_long\]/' + esql.query: + body: + query: 'FROM events_ip_* METADATA _index | EVAL event_duration = TO_IP(event_duration) | KEEP _index, @timestamp, client_ip, event_duration, message | SORT _index ASC, @timestamp DESC' + +--- +load two indices with single conversion function TO_LONG: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_ip_* METADATA _index | EVAL event_duration = TO_LONG(event_duration) | KEEP _index, @timestamp, client_ip, event_duration, message | SORT _index ASC, @timestamp DESC' + + - match: { columns.0.name: "_index" } + - match: { columns.0.type: "keyword" } + - match: { columns.1.name: "@timestamp" } + - match: { columns.1.type: "date" } + - match: { columns.2.name: "client_ip" } + - match: { columns.2.type: "ip" } + - match: { columns.3.name: "event_duration" } + - match: { columns.3.type: "long" } + - match: { columns.4.name: "message" } + - match: { columns.4.type: "keyword" } + - length: { values: 14 } + - match: { values.0.0: "events_ip_keyword" } + - match: { values.0.1: "2023-10-23T13:55:01.543Z" } + - match: { values.0.2: "172.21.3.15" } + - match: { values.0.3: 1756467 } + - match: { values.0.4: "Connected to 10.1.0.1" } + - match: { values.7.0: "events_ip_long" } + - match: { values.7.1: "2023-10-23T13:55:01.543Z" } + - match: { values.7.2: "172.21.3.15" } + - match: { values.7.3: 1756467 } + - match: { values.7.4: "Connected to 10.1.0.1" } + +--- +load two indices and drop ambiguous field event_duration: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_ip_* | DROP event_duration' + + - length: { values: 14 } + +--- +load two indices, convert and then drop ambiguous field event_duration: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_ip_* | EVAL event_duration = TO_LONG(event_duration) | DROP event_duration' + + - length: { values: 14 } + +--- +load two indices, convert, rename and then drop ambiguous field event_duration: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_ip_* | EVAL x = TO_LONG(event_duration) | DROP event_duration' + + - length: { values: 14 } + +--- +# This test needs to change to produce unsupported/null for the original field name +load two indices, convert, rename but not drop ambiguous field event_duration: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_ip_* | EVAL x = TO_LONG(event_duration), y = TO_STRING(event_duration), z = TO_LONG(event_duration) | SORT @timestamp DESC' + + - match: { columns.0.name: "@timestamp" } + - match: { columns.0.type: "date" } + - match: { columns.1.name: "client_ip" } + - match: { columns.1.type: "ip" } + - match: { columns.2.name: "event_duration" } + - match: { columns.2.type: "unsupported" } + - match: { columns.3.name: "message" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "x" } + - match: { columns.4.type: "long" } + - match: { columns.5.name: "y" } + - match: { columns.5.type: "keyword" } + - match: { columns.6.name: "z" } + - match: { columns.6.type: "long" } + - length: { values: 14 } + - match: { values.0.0: "2023-10-23T13:55:01.543Z" } + - match: { values.0.1: "172.21.3.15" } + - match: { values.0.2: null } + - match: { values.0.3: "Connected to 10.1.0.1" } + - match: { values.0.4: 1756467 } + - match: { values.0.5: "1756467" } + - match: { values.0.6: 1756467 } + - match: { values.1.0: "2023-10-23T13:55:01.543Z" } + - match: { values.1.1: "172.21.3.15" } + - match: { values.1.2: null } + - match: { values.1.3: "Connected to 10.1.0.1" } + - match: { values.1.4: 1756467 } + - match: { values.1.5: "1756467" } + - match: { values.1.6: 1756467 } + +############################################################################################################ +# Test two indices where the IP address is mapped as an IP and as a KEYWORD + +--- +load two indices, showing unsupported type and null value for client_ip: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_*_long METADATA _index | SORT _index ASC, @timestamp DESC' + + - match: { columns.0.name: "@timestamp" } + - match: { columns.0.type: "date" } + - match: { columns.1.name: "client_ip" } + - match: { columns.1.type: "unsupported" } + - match: { columns.2.name: "event_duration" } + - match: { columns.2.type: "long" } + - match: { columns.3.name: "message" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "_index" } + - match: { columns.4.type: "keyword" } + - length: { values: 14 } + - match: { values.0.0: "2023-10-23T13:55:01.543Z" } + - match: { values.0.1: null } + - match: { values.0.2: 1756467 } + - match: { values.0.3: "Connected to 10.1.0.1" } + - match: { values.0.4: "events_ip_long" } + - match: { values.7.0: "2023-10-23T13:55:01.543Z" } + - match: { values.7.1: null } + - match: { values.7.2: 1756467 } + - match: { values.7.3: "Connected to 10.1.0.1" } + - match: { values.7.4: "events_keyword_long" } + +--- +load two indices with no conversion function, but needs TO_IP conversion: + - do: + catch: '/Cannot use field \[client_ip\] due to ambiguities being mapped as \[2\] incompatible types: \[ip\] in \[events_ip_long\], \[keyword\] in \[events_keyword_long\]/' + esql.query: + body: + query: 'FROM events_*_long METADATA _index | KEEP _index, @timestamp, client_ip, event_duration, message | SORT _index ASC, @timestamp DESC' + +--- +load two indices with incorrect conversion function, TO_LONG instead of TO_IP: + - do: + catch: '/Cannot use field \[client_ip\] due to ambiguities being mapped as \[2\] incompatible types: \[ip\] in \[events_ip_long\], \[keyword\] in \[events_keyword_long\]/' + esql.query: + body: + query: 'FROM events_*_long METADATA _index | EVAL client_ip = TO_LONG(client_ip) | KEEP _index, @timestamp, client_ip, event_duration, message | SORT _index ASC, @timestamp DESC' + +--- +load two indices with single conversion function TO_IP: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_*_long METADATA _index | EVAL client_ip = TO_IP(client_ip) | KEEP _index, @timestamp, client_ip, event_duration, message | SORT _index ASC, @timestamp DESC' + + - match: { columns.0.name: "_index" } + - match: { columns.0.type: "keyword" } + - match: { columns.1.name: "@timestamp" } + - match: { columns.1.type: "date" } + - match: { columns.2.name: "client_ip" } + - match: { columns.2.type: "ip" } + - match: { columns.3.name: "event_duration" } + - match: { columns.3.type: "long" } + - match: { columns.4.name: "message" } + - match: { columns.4.type: "keyword" } + - length: { values: 14 } + - match: { values.0.0: "events_ip_long" } + - match: { values.0.1: "2023-10-23T13:55:01.543Z" } + - match: { values.0.2: "172.21.3.15" } + - match: { values.0.3: 1756467 } + - match: { values.0.4: "Connected to 10.1.0.1" } + - match: { values.7.0: "events_keyword_long" } + - match: { values.7.1: "2023-10-23T13:55:01.543Z" } + - match: { values.7.2: "172.21.3.15" } + - match: { values.7.3: 1756467 } + - match: { values.7.4: "Connected to 10.1.0.1" } + +--- +load two indices and drop ambiguous field client_ip: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_*_long | DROP client_ip' + + - length: { values: 14 } + +--- +load two indices, convert and then drop ambiguous field client_ip: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_*_long | EVAL client_ip = TO_IP(client_ip) | DROP client_ip' + + - length: { values: 14 } + +--- +load two indices, convert, rename and then drop ambiguous field client_ip: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_*_long | EVAL x = TO_IP(client_ip) | DROP client_ip' + + - length: { values: 14 } + +--- +# This test needs to change to produce unsupported/null for the original field name +load two indices, convert, rename but not drop ambiguous field client_ip: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_*_long | EVAL x = TO_IP(client_ip), y = TO_STRING(client_ip), z = TO_IP(client_ip) | SORT @timestamp DESC' + + - match: { columns.0.name: "@timestamp" } + - match: { columns.0.type: "date" } + - match: { columns.1.name: "client_ip" } + - match: { columns.1.type: "unsupported" } + - match: { columns.2.name: "event_duration" } + - match: { columns.2.type: "long" } + - match: { columns.3.name: "message" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "x" } + - match: { columns.4.type: "ip" } + - match: { columns.5.name: "y" } + - match: { columns.5.type: "keyword" } + - match: { columns.6.name: "z" } + - match: { columns.6.type: "ip" } + - length: { values: 14 } + - match: { values.0.0: "2023-10-23T13:55:01.543Z" } + - match: { values.0.1: null } + - match: { values.0.2: 1756467 } + - match: { values.0.3: "Connected to 10.1.0.1" } + - match: { values.0.4: "172.21.3.15" } + - match: { values.0.5: "172.21.3.15" } + - match: { values.0.6: "172.21.3.15" } + - match: { values.1.0: "2023-10-23T13:55:01.543Z" } + - match: { values.1.1: null } + - match: { values.1.2: 1756467 } + - match: { values.1.3: "Connected to 10.1.0.1" } + - match: { values.1.4: "172.21.3.15" } + - match: { values.1.5: "172.21.3.15" } + - match: { values.1.6: "172.21.3.15" } + +############################################################################################################ +# Test four indices with both the client_IP (IP and KEYWORD) and event_duration (LONG and KEYWORD) mappings + +--- +load four indices with single conversion function TO_LONG: + - do: + catch: '/Cannot use field \[client_ip\] due to ambiguities being mapped as \[2\] incompatible types: \[ip\] in \[events_ip_keyword, events_ip_long\], \[keyword\] in \[events_keyword_keyword, events_keyword_long\]/' + esql.query: + body: + query: 'FROM events_* METADATA _index | EVAL event_duration = TO_LONG(event_duration) | KEEP _index, @timestamp, client_ip, event_duration, message | SORT _index ASC, @timestamp DESC' + +--- +load four indices with single conversion function TO_IP: + - do: + catch: '/Cannot use field \[event_duration\] due to ambiguities being mapped as \[2\] incompatible types: \[keyword\] in \[events_ip_keyword, events_keyword_keyword\], \[long\] in \[events_ip_long, events_keyword_long\]/' + esql.query: + body: + query: 'FROM events_* METADATA _index | EVAL client_ip = TO_IP(client_ip) | KEEP _index, @timestamp, client_ip, event_duration, message | SORT _index ASC, @timestamp DESC' + +--- +load four indices with multiple conversion functions TO_LONG and TO_IP: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_* METADATA _index | EVAL event_duration = TO_LONG(event_duration), client_ip = TO_IP(client_ip) | KEEP _index, @timestamp, client_ip, event_duration, message | SORT _index ASC, @timestamp DESC' + + - match: { columns.0.name: "_index" } + - match: { columns.0.type: "keyword" } + - match: { columns.1.name: "@timestamp" } + - match: { columns.1.type: "date" } + - match: { columns.2.name: "client_ip" } + - match: { columns.2.type: "ip" } + - match: { columns.3.name: "event_duration" } + - match: { columns.3.type: "long" } + - match: { columns.4.name: "message" } + - match: { columns.4.type: "keyword" } + - length: { values: 28 } + - match: { values.0.0: "events_ip_keyword" } + - match: { values.0.1: "2023-10-23T13:55:01.543Z" } + - match: { values.0.2: "172.21.3.15" } + - match: { values.0.3: 1756467 } + - match: { values.0.4: "Connected to 10.1.0.1" } + - match: { values.7.0: "events_ip_long" } + - match: { values.7.1: "2023-10-23T13:55:01.543Z" } + - match: { values.7.2: "172.21.3.15" } + - match: { values.7.3: 1756467 } + - match: { values.7.4: "Connected to 10.1.0.1" } + - match: { values.14.0: "events_keyword_keyword" } + - match: { values.14.1: "2023-10-23T13:55:01.543Z" } + - match: { values.14.2: "172.21.3.15" } + - match: { values.14.3: 1756467 } + - match: { values.14.4: "Connected to 10.1.0.1" } + - match: { values.21.0: "events_keyword_long" } + - match: { values.21.1: "2023-10-23T13:55:01.543Z" } + - match: { values.21.2: "172.21.3.15" } + - match: { values.21.3: 1756467 } + - match: { values.21.4: "Connected to 10.1.0.1" } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/161_union_types_subfields.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/161_union_types_subfields.yml new file mode 100644 index 0000000000000..99bd1d6508895 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/161_union_types_subfields.yml @@ -0,0 +1,203 @@ +setup: + - requires: + capabilities: + - method: POST + path: /_query + parameters: [ method, path, parameters, capabilities ] + capabilities: [ union_types ] + reason: "Union types introduced in 8.15.0" + test_runner_features: [ capabilities, allowed_warnings_regex ] + + - do: + indices.create: + index: test1 + body: + mappings: + properties: + obj: + properties: + keyword: + type: keyword + integer: + type: integer + keyword: + type: boolean + integer: + type: version + + - do: + indices.create: + index: test2 + body: + mappings: + properties: + obj: + properties: + keyword: + type: boolean + integer: + type: version + keyword: + type: keyword + integer: + type: integer + + - do: + bulk: + refresh: true + index: test1 + body: + - '{ "index": {"_id": 11} }' + - '{ "obj.keyword": "true", "obj.integer": 100, "keyword": "true", "integer": "50" }' + - '{ "index": {"_id": 12} }' + - '{ "obj.keyword": "US", "obj.integer": 20, "keyword": false, "integer": "1.2.3" }' + + - do: + bulk: + refresh: true + index: test2 + body: + - '{ "index": {"_id": 21} }' + - '{ "obj.keyword": "true", "obj.integer": "50", "keyword": "true", "integer": 100 }' + - '{ "index": {"_id": 22} }' + - '{ "obj.keyword": false, "obj.integer": "1.2.3", "keyword": "US", "integer": 20 }' + +--- +"load single index": + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM test1 METADATA _id | KEEP _id, obj.integer, obj.keyword | SORT _id ASC' + + - match: { columns.0.name: "_id" } + - match: { columns.0.type: "keyword" } + - match: { columns.1.name: "obj.integer" } + - match: { columns.1.type: "integer" } + - match: { columns.2.name: "obj.keyword" } + - match: { columns.2.type: "keyword" } + - length: { values: 2 } + - match: { values.0.0: "11" } + - match: { values.0.1: 100 } + - match: { values.0.2: "true" } + - match: { values.1.0: "12" } + - match: { values.1.1: 20 } + - match: { values.1.2: "US" } + +--- +"load two indices with to_string": + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM test* METADATA _id | EVAL s = TO_STRING(obj.keyword) | KEEP _id, s | SORT _id ASC' + + - match: { columns.0.name: "_id" } + - match: { columns.0.type: "keyword" } + - match: { columns.1.name: "s" } + - match: { columns.1.type: "keyword" } + - length: { values: 4 } + - match: { values.0.0: "11" } + - match: { values.0.1: "true" } + - match: { values.1.0: "12" } + - match: { values.1.1: "US" } + - match: { values.2.0: "21" } + - match: { values.2.1: "true" } + - match: { values.3.0: "22" } + - match: { values.3.1: "false" } + + +--- +"load two indices with to_version": + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM test* METADATA _id | EVAL v = TO_VERSION(TO_STRING(obj.integer)) | KEEP _id, v | SORT _id ASC' + + - match: { columns.0.name: "_id" } + - match: { columns.0.type: "keyword" } + - match: { columns.1.name: "v" } + - match: { columns.1.type: "version" } + - length: { values: 4 } + - match: { values.0.0: "11" } + - match: { values.0.1: "100" } + - match: { values.1.0: "12" } + - match: { values.1.1: "20" } + - match: { values.2.0: "21" } + - match: { values.2.1: "50" } + - match: { values.3.0: "22" } + - match: { values.3.1: "1.2.3" } + +--- +"load two indices with to_version and to_string": + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM test* METADATA _id | EVAL v = TO_VERSION(TO_STRING(obj.integer)), s = TO_STRING(obj.keyword) | KEEP _id, v, s | SORT _id ASC' + + - match: { columns.0.name: "_id" } + - match: { columns.0.type: "keyword" } + - match: { columns.1.name: "v" } + - match: { columns.1.type: "version" } + - match: { columns.2.name: "s" } + - match: { columns.2.type: "keyword" } + - length: { values: 4 } + - match: { values.0.0: "11" } + - match: { values.0.1: "100" } + - match: { values.0.2: "true" } + - match: { values.1.0: "12" } + - match: { values.1.1: "20" } + - match: { values.1.2: "US" } + - match: { values.2.0: "21" } + - match: { values.2.1: "50" } + - match: { values.2.2: "true" } + - match: { values.3.0: "22" } + - match: { values.3.1: "1.2.3" } + - match: { values.3.2: "false" } + +--- +"load two indices with to_version and to_string nested and un-nested": + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM test* METADATA _id | EVAL nv = TO_VERSION(TO_STRING(obj.integer)), uv = TO_VERSION(TO_STRING(integer)), ns = TO_STRING(obj.keyword), us = TO_STRING(keyword) | KEEP _id, nv, uv, ns, us | SORT _id ASC' + + - match: { columns.0.name: "_id" } + - match: { columns.0.type: "keyword" } + - match: { columns.1.name: "nv" } + - match: { columns.1.type: "version" } + - match: { columns.2.name: "uv" } + - match: { columns.2.type: "version" } + - match: { columns.3.name: "ns" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "us" } + - match: { columns.4.type: "keyword" } + - length: { values: 4 } + - match: { values.0.0: "11" } + - match: { values.0.1: "100" } + - match: { values.0.2: "50" } + - match: { values.0.3: "true" } + - match: { values.0.4: "true" } + - match: { values.1.0: "12" } + - match: { values.1.1: "20" } + - match: { values.1.2: "1.2.3" } + - match: { values.1.3: "US" } + - match: { values.1.4: "false" } + - match: { values.2.0: "21" } + - match: { values.2.1: "50" } + - match: { values.2.2: "100" } + - match: { values.2.3: "true" } + - match: { values.2.4: "true" } + - match: { values.3.0: "22" } + - match: { values.3.1: "1.2.3" } + - match: { values.3.2: "20" } + - match: { values.3.3: "false" } + - match: { values.3.4: "US" } From 56a19f96365d4ea0711f1e33057e4cb00f4e8ccd Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 19 Jun 2024 17:03:49 +0100 Subject: [PATCH 38/44] Fix `TasksIT#testTasksCancellation` (#109929) The tasks are removed from the task manager _after_ sending the response, so we cannot reliably assert they're done. With this commit we wait for them to complete properly first. Closes #109686 --- .../action/admin/cluster/node/tasks/TasksIT.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 30be4c86eaeef..4ad2a56d2e979 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -488,7 +488,6 @@ public void onTaskRegistered(Task task) { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/109686") public void testTasksCancellation() throws Exception { // Start blocking test task // Get real client (the plugin is not registered on transport nodes) @@ -511,6 +510,9 @@ public void testTasksCancellation() throws Exception { expectThrows(TaskCancelledException.class, future); + logger.info("--> waiting for all ongoing tasks to complete within a reasonable time"); + safeGet(clusterAdmin().prepareListTasks().setActions(TEST_TASK_ACTION.name() + "*").setWaitForCompletion(true).execute()); + logger.info("--> checking that test tasks are not running"); assertEquals(0, clusterAdmin().prepareListTasks().setActions(TEST_TASK_ACTION.name() + "*").get().getTasks().size()); } From c6e21a9fd372c0236b4a4375d2074c974fc51cae Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Wed, 19 Jun 2024 18:19:44 +0200 Subject: [PATCH 39/44] Fix Bulk Helpers link of Python (#108694) (#109939) Co-authored-by: Hasanul Islam --- docs/reference/docs/bulk.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index 02f7d7e941fe8..69bf3d1b7db5a 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -140,7 +140,7 @@ Perl:: Python:: - See https://elasticsearch-py.readthedocs.org/en/master/helpers.html[elasticsearch.helpers.*] + See https://elasticsearch-py.readthedocs.io/en/latest/helpers.html[elasticsearch.helpers.*] JavaScript:: From ab55833534caa840e82954356d366d21fca27e6e Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Wed, 19 Jun 2024 17:38:31 +0100 Subject: [PATCH 40/44] Fix for TopNOperatorTests (#109937) --- muted-tests.yml | 3 --- .../java/org/elasticsearch/compute/data/BlockTestUtils.java | 3 ++- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index aef4b526e8b52..7ec3087b83b25 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -75,9 +75,6 @@ tests: method: "testFetchAllEntities" - class: "org.elasticsearch.xpack.ml.integration.AutodetectMemoryLimitIT" issue: "https://github.com/elastic/elasticsearch/issues/109904" -- class: "org.elasticsearch.compute.operator.topn.TopNOperatorTests" - issue: "https://github.com/elastic/elasticsearch/issues/109915" - method: "testRandomMultiValuesTopN" # Examples: diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockTestUtils.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockTestUtils.java index b99594eb04e08..55e80a9124de0 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockTestUtils.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockTestUtils.java @@ -17,6 +17,7 @@ import static org.elasticsearch.test.ESTestCase.between; import static org.elasticsearch.test.ESTestCase.randomBoolean; import static org.elasticsearch.test.ESTestCase.randomDouble; +import static org.elasticsearch.test.ESTestCase.randomFloat; import static org.elasticsearch.test.ESTestCase.randomInt; import static org.elasticsearch.test.ESTestCase.randomLong; import static org.elasticsearch.test.ESTestCase.randomRealisticUnicodeOfCodepointLengthBetween; @@ -31,7 +32,7 @@ public static Object randomValue(ElementType e) { return switch (e) { case INT -> randomInt(); case LONG -> randomLong(); - case FLOAT -> Float.intBitsToFloat(randomInt()); + case FLOAT -> randomFloat(); case DOUBLE -> randomDouble(); case BYTES_REF -> new BytesRef(randomRealisticUnicodeOfCodepointLengthBetween(0, 5)); // TODO: also test spatial WKB case BOOLEAN -> randomBoolean(); From 7d135a2c22e5a217e51b912670f17b449fcc50dc Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 19 Jun 2024 18:50:44 +0200 Subject: [PATCH 41/44] Don't synchronize MapperService.doMerge for preflight checks (#109934) We don't need to synchronize if we are not actually updating the mapping field. This is needlessly slowing down dynamic mapping updates and blocking the write pool. --- .../index/mapper/MapperService.java | 21 ++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 3ac4c0b0e18e1..277b9c4b66b33 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -560,18 +560,25 @@ public DocumentMapper merge(String type, CompressedXContent mappingSource, Merge return doMerge(type, reason, mappingSourceAsMap); } - private synchronized DocumentMapper doMerge(String type, MergeReason reason, Map mappingSourceAsMap) { + private DocumentMapper doMerge(String type, MergeReason reason, Map mappingSourceAsMap) { Mapping incomingMapping = parseMapping(type, reason, mappingSourceAsMap); - Mapping mapping = mergeMappings(this.mapper, incomingMapping, reason, this.indexSettings); // TODO: In many cases the source here is equal to mappingSource so we need not serialize again. // We should identify these cases reliably and save expensive serialization here - DocumentMapper newMapper = newDocumentMapper(mapping, reason, mapping.toCompressedXContent()); if (reason == MergeReason.MAPPING_AUTO_UPDATE_PREFLIGHT) { - return newMapper; + // only doing a merge without updating the actual #mapper field, no need to synchronize + Mapping mapping = mergeMappings(this.mapper, incomingMapping, MergeReason.MAPPING_AUTO_UPDATE_PREFLIGHT, this.indexSettings); + return newDocumentMapper(mapping, MergeReason.MAPPING_AUTO_UPDATE_PREFLIGHT, mapping.toCompressedXContent()); + } else { + // synchronized concurrent mapper updates are guaranteed to set merged mappers derived from the mapper value previously read + // TODO: can we even have concurrent updates here? + synchronized (this) { + Mapping mapping = mergeMappings(this.mapper, incomingMapping, reason, this.indexSettings); + DocumentMapper newMapper = newDocumentMapper(mapping, reason, mapping.toCompressedXContent()); + this.mapper = newMapper; + assert assertSerialization(newMapper, reason); + return newMapper; + } } - this.mapper = newMapper; - assert assertSerialization(newMapper, reason); - return newMapper; } private DocumentMapper newDocumentMapper(Mapping mapping, MergeReason reason, CompressedXContent mappingSource) { From 832c93f3bec66c07749854ccd13f326a5aab9749 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 19 Jun 2024 18:29:54 +0100 Subject: [PATCH 42/44] AwaitsFix: https://github.com/elastic/elasticsearch/issues/109944 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 7ec3087b83b25..9b58a9446b3ca 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -75,6 +75,9 @@ tests: method: "testFetchAllEntities" - class: "org.elasticsearch.xpack.ml.integration.AutodetectMemoryLimitIT" issue: "https://github.com/elastic/elasticsearch/issues/109904" +- class: "org.elasticsearch.xpack.esql.action.AsyncEsqlQueryActionIT" + issue: "https://github.com/elastic/elasticsearch/issues/109944" + method: "testBasicAsyncExecution" # Examples: From b9e7965184efb6da6b06de56821db13911803c3a Mon Sep 17 00:00:00 2001 From: Patrick Doyle <810052+prdoyle@users.noreply.github.com> Date: Wed, 19 Jun 2024 15:23:47 -0400 Subject: [PATCH 43/44] Move PluginsService to its own internal package (#109872) * Mechanical package change in IntelliJ * A couple of manual fixups * Export plugins.loading to deprecation * Put plugin-cli in a module so can export PluginsUtils to it. --- .../script/ScriptScoreBenchmark.java | 2 +- .../resources/checkstyle_suppressions.xml | 2 +- .../plugin-cli/src/main/java/module-info.java | 25 ++++++++++++++++++ .../plugins/cli/InstallPluginAction.java | 2 +- .../plugins/cli/RemovePluginAction.java | 2 +- .../plugins/cli/InstallPluginActionTests.java | 2 +- .../plugins/cli/ListPluginsCommandTests.java | 2 +- .../plugins/cli/RemovePluginActionTests.java | 2 +- .../plugins/cli/SyncPluginsActionTests.java | 2 +- .../datastreams/DataStreamAutoshardingIT.java | 2 +- .../AbstractFeatureMigrationIntegTest.java | 2 +- .../s3/S3BlobStoreRepositoryMetricsTests.java | 2 +- .../s3/S3BlobStoreRepositoryTests.java | 2 +- .../s3/S3RepositoryThirdPartyTests.java | 2 +- .../s3/RepositoryCredentialsTests.java | 2 +- .../bootstrap/SpawnerNoBootstrapTests.java | 2 +- .../http/HealthRestCancellationIT.java | 2 +- .../http/SearchRestCancellationIT.java | 2 +- .../action/IndicesRequestIT.java | 2 +- .../action/admin/ReloadSecureSettingsIT.java | 2 +- .../admin/cluster/tasks/ListTasksIT.java | 2 +- ...tReplicationActionRetryOnClosedNodeIT.java | 2 +- .../coordination/RestHandlerNodesIT.java | 2 +- .../cluster/routing/ShardRoutingRoleIT.java | 2 +- .../indices/recovery/IndexRecoveryIT.java | 2 +- .../indices/recovery/TaskRecoveryIT.java | 2 +- .../metrics/NodeIndexingMetricsIT.java | 2 +- .../IndexFoldersDeletionListenerIT.java | 1 + .../threadpool/SimpleThreadPoolIT.java | 2 +- server/src/main/java/module-info.java | 1 + ...nsportNodesReloadSecureSettingsAction.java | 2 +- .../org/elasticsearch/bootstrap/Security.java | 2 +- .../org/elasticsearch/bootstrap/Spawner.java | 2 +- .../metadata/DataStreamFactoryRetention.java | 2 +- .../common/settings/ClusterSettings.java | 2 +- .../elasticsearch/indices/IndicesService.java | 2 +- .../indices/IndicesServiceBuilder.java | 2 +- .../java/org/elasticsearch/node/Node.java | 2 +- .../elasticsearch/node/NodeConstruction.java | 2 +- .../org/elasticsearch/node/NodeService.java | 2 +- .../node/NodeServiceProvider.java | 2 +- .../plugins/internal/ReloadAwarePlugin.java | 3 ++- .../plugins/{ => loading}/ModuleSupport.java | 2 +- .../plugins/{ => loading}/PluginBundle.java | 4 ++- .../{ => loading}/PluginIntrospector.java | 23 +++++++++++++++- .../PluginLoaderIndirection.java | 2 +- .../plugins/{ => loading}/PluginsService.java | 7 ++++- .../plugins/{ => loading}/PluginsUtils.java | 3 ++- .../StablePluginPlaceHolder.java | 4 ++- .../{ => loading}/UberModuleClassLoader.java | 2 +- .../scanners/NamedComponentReader.java | 2 +- .../scanners/StablePluginsRegistry.java | 2 +- ...HierarchyCircuitBreakerTelemetryTests.java | 2 +- .../org/elasticsearch/node/NodeTests.java | 2 +- .../plugins/PluginDescriptorTests.java | 2 ++ .../internal/ReloadAwarePluginTests.java | 2 +- .../PluginIntrospectorTests.java | 23 +++++++++++++++- .../{ => loading}/PluginsServiceTests.java | 15 +++++++---- .../{ => loading}/PluginsUtilsTests.java | 3 ++- .../UberModuleClassLoaderTests.java | 2 +- .../scanners/StablePluginsRegistryTests.java | 2 +- .../SearchTookTimeTelemetryTests.java | 2 +- .../SearchTransportTelemetryTests.java | 2 +- .../snapshots/SnapshotResiliencyTests.java | 2 +- ...ng.PluginsServiceTests$TestExtensionPoint} | 4 +-- .../plugins/{ => loading}/dummy-plugin.jar | Bin .../{ => loading}/non-extensible-plugin.jar | Bin .../support/CancellableActionTestPlugin.java | 2 +- .../bootstrap/BootstrapForTesting.java | 2 +- .../java/org/elasticsearch/node/MockNode.java | 4 +-- .../{ => loading}/MockPluginsService.java | 5 +++- .../plugins/{ => loading}/PluginTestUtil.java | 3 ++- .../test/AbstractBuilderTestCase.java | 4 +-- .../AbstractSearchCancellationTestCase.java | 2 +- .../plugins/MockPluginsServiceTests.java | 1 + ...AutoscalingCapacityRestCancellationIT.java | 2 +- .../TransportNodeDeprecationCheckAction.java | 2 +- ...nsportNodeDeprecationCheckActionTests.java | 2 +- .../DownsampleActionSingleNodeTests.java | 2 +- .../AbstractEqlBlockingIntegTestCase.java | 2 +- .../exporter/http/HttpExporterIT.java | 2 +- .../profiling/action/CancellationIT.java | 2 +- ...archableSnapshotsPrewarmingIntegTests.java | 2 +- .../authc/jwt/JwtRealmSingleNodeTests.java | 2 +- .../ProfileCancellationIntegTests.java | 2 +- .../AbstractSqlBlockingIntegTestCase.java | 2 +- .../WriteLoadForecasterIT.java | 2 +- 87 files changed, 180 insertions(+), 87 deletions(-) create mode 100644 distribution/tools/plugin-cli/src/main/java/module-info.java rename server/src/main/java/org/elasticsearch/plugins/{ => loading}/ModuleSupport.java (99%) rename server/src/main/java/org/elasticsearch/plugins/{ => loading}/PluginBundle.java (96%) rename server/src/main/java/org/elasticsearch/plugins/{ => loading}/PluginIntrospector.java (87%) rename server/src/main/java/org/elasticsearch/plugins/{ => loading}/PluginLoaderIndirection.java (95%) rename server/src/main/java/org/elasticsearch/plugins/{ => loading}/PluginsService.java (99%) rename server/src/main/java/org/elasticsearch/plugins/{ => loading}/PluginsUtils.java (99%) rename server/src/main/java/org/elasticsearch/plugins/{ => loading}/StablePluginPlaceHolder.java (85%) rename server/src/main/java/org/elasticsearch/plugins/{ => loading}/UberModuleClassLoader.java (99%) rename server/src/test/java/org/elasticsearch/plugins/{ => loading}/PluginIntrospectorTests.java (93%) rename server/src/test/java/org/elasticsearch/plugins/{ => loading}/PluginsServiceTests.java (98%) rename server/src/test/java/org/elasticsearch/plugins/{ => loading}/PluginsUtilsTests.java (99%) rename server/src/test/java/org/elasticsearch/plugins/{ => loading}/UberModuleClassLoaderTests.java (99%) rename server/src/test/resources/META-INF/services/{org.elasticsearch.plugins.PluginsServiceTests$TestExtensionPoint => org.elasticsearch.plugins.loading.PluginsServiceTests$TestExtensionPoint} (71%) rename server/src/test/resources/org/elasticsearch/plugins/{ => loading}/dummy-plugin.jar (100%) rename server/src/test/resources/org/elasticsearch/plugins/{ => loading}/non-extensible-plugin.jar (100%) rename test/framework/src/main/java/org/elasticsearch/plugins/{ => loading}/MockPluginsService.java (97%) rename test/framework/src/main/java/org/elasticsearch/plugins/{ => loading}/PluginTestUtil.java (96%) diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java index 5a27abe8be2a4..fb502302be8f0 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java @@ -33,8 +33,8 @@ import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.script.DocReader; import org.elasticsearch.script.DocValuesDocReader; import org.elasticsearch.script.ScoreScript; diff --git a/build-tools-internal/src/main/resources/checkstyle_suppressions.xml b/build-tools-internal/src/main/resources/checkstyle_suppressions.xml index fd01993951959..de5a8aea34980 100644 --- a/build-tools-internal/src/main/resources/checkstyle_suppressions.xml +++ b/build-tools-internal/src/main/resources/checkstyle_suppressions.xml @@ -20,7 +20,7 @@ - + diff --git a/distribution/tools/plugin-cli/src/main/java/module-info.java b/distribution/tools/plugin-cli/src/main/java/module-info.java new file mode 100644 index 0000000000000..4898125790e37 --- /dev/null +++ b/distribution/tools/plugin-cli/src/main/java/module-info.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +import org.elasticsearch.cli.CliToolProvider; + +module org.elasticsearch.plugins.cli { + requires jopt.simple; + requires org.apache.lucene.core; + requires org.apache.lucene.suggest; + requires org.bouncycastle.fips.core; + requires org.bouncycastle.pg; + requires org.elasticsearch.base; + requires org.elasticsearch.cli; + requires org.elasticsearch.plugin.scanner; + requires org.elasticsearch.server; + requires org.elasticsearch.xcontent; + requires org.objectweb.asm; + + provides CliToolProvider with org.elasticsearch.plugins.cli.PluginCliProvider, org.elasticsearch.plugins.cli.SyncPluginsCliProvider; +} diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java index c7bee4a6c172d..9bfe000feb81e 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java @@ -41,7 +41,7 @@ import org.elasticsearch.plugin.scanner.NamedComponentScanner; import org.elasticsearch.plugins.Platforms; import org.elasticsearch.plugins.PluginDescriptor; -import org.elasticsearch.plugins.PluginsUtils; +import org.elasticsearch.plugins.loading.PluginsUtils; import org.objectweb.asm.ClassReader; import java.io.BufferedReader; diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/RemovePluginAction.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/RemovePluginAction.java index 4714ef202b258..6d19641fb372a 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/RemovePluginAction.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/RemovePluginAction.java @@ -13,7 +13,7 @@ import org.elasticsearch.cli.UserException; import org.elasticsearch.core.IOUtils; import org.elasticsearch.env.Environment; -import org.elasticsearch.plugins.PluginsUtils; +import org.elasticsearch.plugins.loading.PluginsUtils; import java.io.IOException; import java.nio.file.FileAlreadyExistsException; diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java index 3dc7af07d4d83..fe18507398e1c 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java @@ -50,7 +50,7 @@ import org.elasticsearch.plugin.scanner.NamedComponentScanner; import org.elasticsearch.plugins.Platforms; import org.elasticsearch.plugins.PluginDescriptor; -import org.elasticsearch.plugins.PluginTestUtil; +import org.elasticsearch.plugins.loading.PluginTestUtil; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.PosixPermissionsResetter; import org.elasticsearch.test.compiler.InMemoryJavaCompiler; diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/ListPluginsCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/ListPluginsCommandTests.java index b225bc441794a..cec6b2c7881b1 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/ListPluginsCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/ListPluginsCommandTests.java @@ -18,7 +18,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; -import org.elasticsearch.plugins.PluginTestUtil; +import org.elasticsearch.plugins.loading.PluginTestUtil; import org.junit.Before; import java.io.IOException; diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/RemovePluginActionTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/RemovePluginActionTests.java index 73e89fc948029..9d9300155e9cb 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/RemovePluginActionTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/RemovePluginActionTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; -import org.elasticsearch.plugins.PluginTestUtil; +import org.elasticsearch.plugins.loading.PluginTestUtil; import org.elasticsearch.test.ESTestCase; import org.junit.Before; diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java index 9802b4039bb7b..1e9002a7db392 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java @@ -14,8 +14,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; -import org.elasticsearch.plugins.PluginTestUtil; import org.elasticsearch.plugins.cli.SyncPluginsAction.PluginChanges; +import org.elasticsearch.plugins.loading.PluginTestUtil; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.hamcrest.Matchers; diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java index a4c9a9d3e1c67..7bc0192b723ce 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java @@ -51,7 +51,7 @@ import org.elasticsearch.index.shard.ShardPath; import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.telemetry.InstrumentType; import org.elasticsearch.telemetry.Measurement; diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java index 3bbc8e4b969ee..f7e07a82e00fb 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java @@ -28,8 +28,8 @@ import org.elasticsearch.indices.AssociatedIndexDescriptor; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.SystemIndexPlugin; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentBuilder; diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java index f8503bca3ec67..d88cf885ef921 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.s3.S3BlobStore.Operation; diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 88f0e01db3e6a..3b9eff683dbbf 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -38,7 +38,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java index 5064910723ab6..8ff9fb478c402 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -30,7 +30,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.blobstore.RequestedRangeNotSatisfiedException; diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index 9a1d12fab0af5..bd7827be1cca4 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -21,7 +21,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.rest.AbstractRestChannel; import org.elasticsearch.rest.RestRequest; diff --git a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java index 6369e02e1f605..96de064424312 100644 --- a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java +++ b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.plugins.Platforms; -import org.elasticsearch.plugins.PluginTestUtil; +import org.elasticsearch.plugins.loading.PluginTestUtil; import org.elasticsearch.test.GraalVMThreadsFilter; import org.elasticsearch.test.MockLog; diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HealthRestCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HealthRestCancellationIT.java index dc298fc616506..6da08b71a6d58 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HealthRestCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HealthRestCancellationIT.java @@ -24,7 +24,7 @@ import org.elasticsearch.health.node.HealthInfo; import org.elasticsearch.plugins.HealthPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import java.util.ArrayList; import java.util.Collection; diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java index 73dd1525f8a08..462be7eb85283 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java index 920677e8c4b4a..7969e401d1677 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java @@ -73,7 +73,7 @@ import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java index 5d4a922ec3e11..9eb603ad894d3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java @@ -23,8 +23,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ReloadablePlugin; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.test.ESIntegTestCase; import org.junit.BeforeClass; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java index 4a076cb3b6e66..0b0344187c8c6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java @@ -21,7 +21,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java index b89cea7dff089..29abeb8badf90 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java @@ -27,7 +27,7 @@ import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RestHandlerNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RestHandlerNodesIT.java index 5a7f4609a7d0f..08ae42c57fff1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RestHandlerNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RestHandlerNodesIT.java @@ -18,7 +18,7 @@ import org.elasticsearch.features.NodeFeature; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java index bb9324dd7d10c..8aacf625e82cf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java @@ -48,7 +48,7 @@ import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 4f15b82ca1f16..ea7df1cacf3e2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -104,7 +104,7 @@ import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/TaskRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/TaskRecoveryIT.java index 16905dc5b4d37..833bd3a8c9e3c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/TaskRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/TaskRecoveryIT.java @@ -16,7 +16,7 @@ import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java index 7b26cc5edf1bc..ce0d7419e96e1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java @@ -19,7 +19,7 @@ import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.telemetry.Measurement; import org.elasticsearch.telemetry.TestTelemetryPlugin; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java index 542a4cd2c4c92..2d1b829b2e2dd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java @@ -21,6 +21,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java b/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java index 44b6ef1d51ce0..a0bc6dd933756 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.telemetry.InstrumentType; import org.elasticsearch.telemetry.Measurement; import org.elasticsearch.telemetry.TestTelemetryPlugin; diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index db7e3d40518ba..4138036cbcea1 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -460,4 +460,5 @@ org.elasticsearch.serverless.shardhealth, org.elasticsearch.serverless.apifiltering; exports org.elasticsearch.lucene.spatial; + exports org.elasticsearch.plugins.loading to org.elasticsearch.deprecation, org.elasticsearch.plugins.cli; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index f906b7d659b7b..559bf803ef62a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -24,8 +24,8 @@ import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; -import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ReloadablePlugin; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Security.java b/server/src/main/java/org/elasticsearch/bootstrap/Security.java index 12edf344c72a2..7b05e974f18dc 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -19,7 +19,7 @@ import org.elasticsearch.jdk.JarHell; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; -import org.elasticsearch.plugins.PluginsUtils; +import org.elasticsearch.plugins.loading.PluginsUtils; import org.elasticsearch.secure_sm.SecureSM; import org.elasticsearch.transport.TcpTransport; diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Spawner.java b/server/src/main/java/org/elasticsearch/bootstrap/Spawner.java index 2d37da1d10245..0910b75563dbc 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Spawner.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Spawner.java @@ -16,7 +16,7 @@ import org.elasticsearch.nativeaccess.NativeAccess; import org.elasticsearch.plugins.Platforms; import org.elasticsearch.plugins.PluginDescriptor; -import org.elasticsearch.plugins.PluginsUtils; +import org.elasticsearch.plugins.loading.PluginsUtils; import java.io.BufferedReader; import java.io.Closeable; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java index 5b96f92193e98..8f4b48aff6be5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java @@ -11,7 +11,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; /** * Holds the factory retention configuration. Factory retention is the global retention configuration meant to be diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index d5f770ebb95fc..944f0967456e3 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -106,7 +106,7 @@ import org.elasticsearch.node.NodeRoleSettings; import org.elasticsearch.persistent.PersistentTasksClusterService; import org.elasticsearch.persistent.decider.EnableAssignmentDecider; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.readiness.ReadinessService; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.BaseRestHandler; diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 199bbc54fa3d6..79e1334211a2d 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -133,7 +133,7 @@ import org.elasticsearch.node.Node; import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.IndexStorePlugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java b/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java index d56cf3c2c1e1a..0ba62ccc7aa41 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java @@ -29,7 +29,7 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.IndexStorePlugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.search.internal.ShardSearchRequest; diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 11eb8760b2dbb..bbc0a4183c3b6 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -68,7 +68,7 @@ import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.MetadataUpgrader; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.readiness.ReadinessService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.reservedstate.service.FileSettingsService; diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index bcf8451e5fe54..8b6b0a6d9c992 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -156,7 +156,6 @@ import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.PersistentTaskPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.RecoveryPlannerPlugin; import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.plugins.RepositoryPlugin; @@ -171,6 +170,7 @@ import org.elasticsearch.plugins.internal.ReloadAwarePlugin; import org.elasticsearch.plugins.internal.RestExtension; import org.elasticsearch.plugins.internal.SettingsExtension; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.readiness.ReadinessService; import org.elasticsearch.repositories.RepositoriesModule; import org.elasticsearch.repositories.RepositoriesService; diff --git a/server/src/main/java/org/elasticsearch/node/NodeService.java b/server/src/main/java/org/elasticsearch/node/NodeService.java index 059b05091a6ae..ee99a1bba19e1 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeService.java +++ b/server/src/main/java/org/elasticsearch/node/NodeService.java @@ -31,7 +31,7 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.ingest.IngestService; import org.elasticsearch.monitor.MonitorService; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.support.AggregationUsageService; diff --git a/server/src/main/java/org/elasticsearch/node/NodeServiceProvider.java b/server/src/main/java/org/elasticsearch/node/NodeServiceProvider.java index 914dd51d0c6b2..9114f2da6f43c 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeServiceProvider.java +++ b/server/src/main/java/org/elasticsearch/node/NodeServiceProvider.java @@ -26,7 +26,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.recovery.RecoverySettings; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.readiness.ReadinessService; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; diff --git a/server/src/main/java/org/elasticsearch/plugins/internal/ReloadAwarePlugin.java b/server/src/main/java/org/elasticsearch/plugins/internal/ReloadAwarePlugin.java index 71228efe227c5..3463d3ec64498 100644 --- a/server/src/main/java/org/elasticsearch/plugins/internal/ReloadAwarePlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/internal/ReloadAwarePlugin.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.ReloadablePlugin; +import org.elasticsearch.plugins.loading.PluginsService; /** * A plugin that may receive a {@link ReloadablePlugin} in order to @@ -22,7 +23,7 @@ public interface ReloadAwarePlugin { * *

    This callback is in the form of an implementation of {@link ReloadablePlugin}, * but the implementation does not need to be a {@link org.elasticsearch.plugins.Plugin}, - * or be registered with {@link org.elasticsearch.plugins.PluginsService}. + * or be registered with {@link PluginsService}. * * @param reloadablePlugin A plugin that this plugin may be able to reload */ diff --git a/server/src/main/java/org/elasticsearch/plugins/ModuleSupport.java b/server/src/main/java/org/elasticsearch/plugins/loading/ModuleSupport.java similarity index 99% rename from server/src/main/java/org/elasticsearch/plugins/ModuleSupport.java rename to server/src/main/java/org/elasticsearch/plugins/loading/ModuleSupport.java index e5f0004431b0e..5072f21ec0552 100644 --- a/server/src/main/java/org/elasticsearch/plugins/ModuleSupport.java +++ b/server/src/main/java/org/elasticsearch/plugins/loading/ModuleSupport.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins; +package org.elasticsearch.plugins.loading; import org.elasticsearch.core.SuppressForbidden; diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginBundle.java b/server/src/main/java/org/elasticsearch/plugins/loading/PluginBundle.java similarity index 96% rename from server/src/main/java/org/elasticsearch/plugins/PluginBundle.java rename to server/src/main/java/org/elasticsearch/plugins/loading/PluginBundle.java index 154ffce6ba05f..4c9c25a71d994 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginBundle.java +++ b/server/src/main/java/org/elasticsearch/plugins/loading/PluginBundle.java @@ -6,7 +6,9 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins; +package org.elasticsearch.plugins.loading; + +import org.elasticsearch.plugins.PluginDescriptor; import java.io.IOException; import java.net.URL; diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginIntrospector.java b/server/src/main/java/org/elasticsearch/plugins/loading/PluginIntrospector.java similarity index 87% rename from server/src/main/java/org/elasticsearch/plugins/PluginIntrospector.java rename to server/src/main/java/org/elasticsearch/plugins/loading/PluginIntrospector.java index 8b7e4faa1f226..e3fb9e9c2e07d 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginIntrospector.java +++ b/server/src/main/java/org/elasticsearch/plugins/loading/PluginIntrospector.java @@ -6,9 +6,30 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins; +package org.elasticsearch.plugins.loading; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.AnalysisPlugin; +import org.elasticsearch.plugins.CircuitBreakerPlugin; +import org.elasticsearch.plugins.ClusterPlugin; +import org.elasticsearch.plugins.DiscoveryPlugin; +import org.elasticsearch.plugins.EnginePlugin; +import org.elasticsearch.plugins.ExtensiblePlugin; +import org.elasticsearch.plugins.HealthPlugin; +import org.elasticsearch.plugins.IndexStorePlugin; +import org.elasticsearch.plugins.IngestPlugin; +import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.plugins.NetworkPlugin; +import org.elasticsearch.plugins.PersistentTaskPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.RecoveryPlannerPlugin; +import org.elasticsearch.plugins.ReloadablePlugin; +import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.plugins.ShutdownAwarePlugin; +import org.elasticsearch.plugins.SystemIndexPlugin; import java.lang.reflect.Method; import java.lang.reflect.Modifier; diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginLoaderIndirection.java b/server/src/main/java/org/elasticsearch/plugins/loading/PluginLoaderIndirection.java similarity index 95% rename from server/src/main/java/org/elasticsearch/plugins/PluginLoaderIndirection.java rename to server/src/main/java/org/elasticsearch/plugins/loading/PluginLoaderIndirection.java index d4a703c163025..d0f41eb1cbaf2 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginLoaderIndirection.java +++ b/server/src/main/java/org/elasticsearch/plugins/loading/PluginLoaderIndirection.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins; +package org.elasticsearch.plugins.loading; import org.elasticsearch.plugins.loader.ExtendedPluginsClassLoader; diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java b/server/src/main/java/org/elasticsearch/plugins/loading/PluginsService.java similarity index 99% rename from server/src/main/java/org/elasticsearch/plugins/PluginsService.java rename to server/src/main/java/org/elasticsearch/plugins/loading/PluginsService.java index 038b03c5dd93f..2f6b6ed3ab0e1 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/server/src/main/java/org/elasticsearch/plugins/loading/PluginsService.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins; +package org.elasticsearch.plugins.loading; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -28,6 +28,11 @@ import org.elasticsearch.jdk.JarHell; import org.elasticsearch.jdk.ModuleQualifiedExportsService; import org.elasticsearch.node.ReportingService; +import org.elasticsearch.plugins.ExtensiblePlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginApiInfo; +import org.elasticsearch.plugins.PluginDescriptor; +import org.elasticsearch.plugins.PluginRuntimeInfo; import org.elasticsearch.plugins.scanners.StablePluginsRegistry; import org.elasticsearch.plugins.spi.SPIClassIterator; diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java b/server/src/main/java/org/elasticsearch/plugins/loading/PluginsUtils.java similarity index 99% rename from server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java rename to server/src/main/java/org/elasticsearch/plugins/loading/PluginsUtils.java index becc5ef05e846..8c90e8abc1818 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java +++ b/server/src/main/java/org/elasticsearch/plugins/loading/PluginsUtils.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins; +package org.elasticsearch.plugins.loading; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -14,6 +14,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.jdk.JarHell; +import org.elasticsearch.plugins.PluginDescriptor; import java.io.IOException; import java.net.URL; diff --git a/server/src/main/java/org/elasticsearch/plugins/StablePluginPlaceHolder.java b/server/src/main/java/org/elasticsearch/plugins/loading/StablePluginPlaceHolder.java similarity index 85% rename from server/src/main/java/org/elasticsearch/plugins/StablePluginPlaceHolder.java rename to server/src/main/java/org/elasticsearch/plugins/loading/StablePluginPlaceHolder.java index c1bc8fcfd12b7..09bff29977686 100644 --- a/server/src/main/java/org/elasticsearch/plugins/StablePluginPlaceHolder.java +++ b/server/src/main/java/org/elasticsearch/plugins/loading/StablePluginPlaceHolder.java @@ -6,7 +6,9 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins; +package org.elasticsearch.plugins.loading; + +import org.elasticsearch.plugins.Plugin; class StablePluginPlaceHolder extends Plugin { private final String name; diff --git a/server/src/main/java/org/elasticsearch/plugins/UberModuleClassLoader.java b/server/src/main/java/org/elasticsearch/plugins/loading/UberModuleClassLoader.java similarity index 99% rename from server/src/main/java/org/elasticsearch/plugins/UberModuleClassLoader.java rename to server/src/main/java/org/elasticsearch/plugins/loading/UberModuleClassLoader.java index dca3afb2ed745..6e0d33f7081af 100644 --- a/server/src/main/java/org/elasticsearch/plugins/UberModuleClassLoader.java +++ b/server/src/main/java/org/elasticsearch/plugins/loading/UberModuleClassLoader.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins; +package org.elasticsearch.plugins.loading; import org.elasticsearch.core.SuppressForbidden; diff --git a/server/src/main/java/org/elasticsearch/plugins/scanners/NamedComponentReader.java b/server/src/main/java/org/elasticsearch/plugins/scanners/NamedComponentReader.java index ed32bd245977a..96ddba9f2a175 100644 --- a/server/src/main/java/org/elasticsearch/plugins/scanners/NamedComponentReader.java +++ b/server/src/main/java/org/elasticsearch/plugins/scanners/NamedComponentReader.java @@ -11,7 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.core.Strings; -import org.elasticsearch.plugins.PluginBundle; +import org.elasticsearch.plugins.loading.PluginBundle; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.io.BufferedInputStream; diff --git a/server/src/main/java/org/elasticsearch/plugins/scanners/StablePluginsRegistry.java b/server/src/main/java/org/elasticsearch/plugins/scanners/StablePluginsRegistry.java index 6e2780a08251f..5c56f040a75a3 100644 --- a/server/src/main/java/org/elasticsearch/plugins/scanners/StablePluginsRegistry.java +++ b/server/src/main/java/org/elasticsearch/plugins/scanners/StablePluginsRegistry.java @@ -8,7 +8,7 @@ package org.elasticsearch.plugins.scanners; -import org.elasticsearch.plugins.PluginBundle; +import org.elasticsearch.plugins.loading.PluginBundle; import java.util.Collection; import java.util.Collections; diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java index 2cbe1202520df..4e94827380a6e 100644 --- a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.telemetry.Measurement; import org.elasticsearch.telemetry.RecordingInstruments; diff --git a/server/src/test/java/org/elasticsearch/node/NodeTests.java b/server/src/test/java/org/elasticsearch/node/NodeTests.java index b36cafd694378..dcee2f3cf9a8b 100644 --- a/server/src/test/java/org/elasticsearch/node/NodeTests.java +++ b/server/src/test/java/org/elasticsearch/node/NodeTests.java @@ -36,8 +36,8 @@ import org.elasticsearch.plugins.CircuitBreakerPlugin; import org.elasticsearch.plugins.ClusterCoordinationPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsServiceTests; import org.elasticsearch.plugins.RecoveryPlannerPlugin; +import org.elasticsearch.plugins.loading.PluginsServiceTests; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java index 7ed4d975fe3be..bc14f87f5b580 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java @@ -12,6 +12,8 @@ import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; import org.elasticsearch.common.io.stream.ByteBufferStreamInput; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.plugins.loading.PluginTestUtil; +import org.elasticsearch.plugins.loading.PluginsUtils; import org.elasticsearch.test.ESTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/plugins/internal/ReloadAwarePluginTests.java b/server/src/test/java/org/elasticsearch/plugins/internal/ReloadAwarePluginTests.java index 2401ad25193a2..4553a65944d30 100644 --- a/server/src/test/java/org/elasticsearch/plugins/internal/ReloadAwarePluginTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/internal/ReloadAwarePluginTests.java @@ -11,8 +11,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.MockNode; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ReloadablePlugin; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.netty4.Netty4Plugin; diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java b/server/src/test/java/org/elasticsearch/plugins/loading/PluginIntrospectorTests.java similarity index 93% rename from server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java rename to server/src/test/java/org/elasticsearch/plugins/loading/PluginIntrospectorTests.java index 5e80b6d217a55..d341ecd72273d 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/loading/PluginIntrospectorTests.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins; +package org.elasticsearch.plugins.loading; import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; @@ -22,6 +22,27 @@ import org.elasticsearch.indices.recovery.plan.RecoveryPlannerService; import org.elasticsearch.indices.recovery.plan.ShardSnapshotsService; import org.elasticsearch.ingest.Processor; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.AnalysisPlugin; +import org.elasticsearch.plugins.CircuitBreakerPlugin; +import org.elasticsearch.plugins.ClusterPlugin; +import org.elasticsearch.plugins.DiscoveryPlugin; +import org.elasticsearch.plugins.EnginePlugin; +import org.elasticsearch.plugins.ExtensiblePlugin; +import org.elasticsearch.plugins.HealthPlugin; +import org.elasticsearch.plugins.IndexStorePlugin; +import org.elasticsearch.plugins.IngestPlugin; +import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.plugins.NetworkPlugin; +import org.elasticsearch.plugins.PersistentTaskPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.RecoveryPlannerPlugin; +import org.elasticsearch.plugins.ReloadablePlugin; +import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.plugins.ShutdownAwarePlugin; +import org.elasticsearch.plugins.SystemIndexPlugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.PrivilegedOperations; import org.elasticsearch.test.compiler.InMemoryJavaCompiler; diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/elasticsearch/plugins/loading/PluginsServiceTests.java similarity index 98% rename from server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java rename to server/src/test/java/org/elasticsearch/plugins/loading/PluginsServiceTests.java index 28ebed88e8f3c..6e4984e2dcf6d 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/loading/PluginsServiceTests.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins; +package org.elasticsearch.plugins.loading; import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.Constants; @@ -19,6 +19,11 @@ import org.elasticsearch.index.IndexModule; import org.elasticsearch.jdk.ModuleQualifiedExportsService; import org.elasticsearch.plugin.analysis.CharFilterFactory; +import org.elasticsearch.plugins.ExtensiblePlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginDescriptor; +import org.elasticsearch.plugins.PluginRuntimeInfo; +import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.plugins.scanners.PluginInfo; import org.elasticsearch.plugins.spi.BarPlugin; import org.elasticsearch.plugins.spi.BarTestService; @@ -313,18 +318,18 @@ public void testNonExtensibleDep() throws Exception { public void testPassingMandatoryPluginCheck() { PluginsService.checkMandatoryPlugins( - Set.of("org.elasticsearch.plugins.PluginsServiceTests$FakePlugin"), - Set.of("org.elasticsearch.plugins.PluginsServiceTests$FakePlugin") + Set.of("org.elasticsearch.plugins.loading.PluginsServiceTests$FakePlugin"), + Set.of("org.elasticsearch.plugins.loading.PluginsServiceTests$FakePlugin") ); } public void testFailingMandatoryPluginCheck() { IllegalStateException e = expectThrows( IllegalStateException.class, - () -> PluginsService.checkMandatoryPlugins(Set.of(), Set.of("org.elasticsearch.plugins.PluginsServiceTests$FakePlugin")) + () -> PluginsService.checkMandatoryPlugins(Set.of(), Set.of("org.elasticsearch.plugins.loading.PluginsServiceTests$FakePlugin")) ); assertEquals( - "missing mandatory plugins [org.elasticsearch.plugins.PluginsServiceTests$FakePlugin], found plugins []", + "missing mandatory plugins [org.elasticsearch.plugins.loading.PluginsServiceTests$FakePlugin], found plugins []", e.getMessage() ); } diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java b/server/src/test/java/org/elasticsearch/plugins/loading/PluginsUtilsTests.java similarity index 99% rename from server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java rename to server/src/test/java/org/elasticsearch/plugins/loading/PluginsUtilsTests.java index a7cc74582afdc..f44de9ac1c704 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/loading/PluginsUtilsTests.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins; +package org.elasticsearch.plugins.loading; import org.apache.logging.log4j.Level; import org.apache.lucene.tests.util.LuceneTestCase; @@ -14,6 +14,7 @@ import org.elasticsearch.Version; import org.elasticsearch.core.PathUtils; import org.elasticsearch.jdk.JarHell; +import org.elasticsearch.plugins.PluginDescriptor; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; diff --git a/server/src/test/java/org/elasticsearch/plugins/UberModuleClassLoaderTests.java b/server/src/test/java/org/elasticsearch/plugins/loading/UberModuleClassLoaderTests.java similarity index 99% rename from server/src/test/java/org/elasticsearch/plugins/UberModuleClassLoaderTests.java rename to server/src/test/java/org/elasticsearch/plugins/loading/UberModuleClassLoaderTests.java index e3cd11c8f3b68..6a12009acce41 100644 --- a/server/src/test/java/org/elasticsearch/plugins/UberModuleClassLoaderTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/loading/UberModuleClassLoaderTests.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins; +package org.elasticsearch.plugins.loading; import org.elasticsearch.common.Strings; import org.elasticsearch.test.ESTestCase; diff --git a/server/src/test/java/org/elasticsearch/plugins/scanners/StablePluginsRegistryTests.java b/server/src/test/java/org/elasticsearch/plugins/scanners/StablePluginsRegistryTests.java index 276cbdbc1a452..b37da49034f78 100644 --- a/server/src/test/java/org/elasticsearch/plugins/scanners/StablePluginsRegistryTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/scanners/StablePluginsRegistryTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.plugins.scanners; -import org.elasticsearch.plugins.PluginBundle; +import org.elasticsearch.plugins.loading.PluginBundle; import org.elasticsearch.test.ESTestCase; import org.mockito.Mockito; diff --git a/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTookTimeTelemetryTests.java b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTookTimeTelemetryTests.java index 850af7f85f76a..5dad16c5be397 100644 --- a/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTookTimeTelemetryTests.java +++ b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTookTimeTelemetryTests.java @@ -12,7 +12,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.telemetry.Measurement; import org.elasticsearch.telemetry.TestTelemetryPlugin; import org.elasticsearch.test.ESSingleNodeTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java index c7fc11e81483f..5edb7147ea259 100644 --- a/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java +++ b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.telemetry.Measurement; import org.elasticsearch.telemetry.TestTelemetryPlugin; import org.elasticsearch.test.ESSingleNodeTestCase; diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index f4aa44f143c40..3100bdd6db529 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -162,8 +162,8 @@ import org.elasticsearch.ingest.IngestService; import org.elasticsearch.monitor.StatusInfo; import org.elasticsearch.node.ResponseCollectorService; -import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.internal.DocumentParsingProvider; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.plugins.scanners.StablePluginsRegistry; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; diff --git a/server/src/test/resources/META-INF/services/org.elasticsearch.plugins.PluginsServiceTests$TestExtensionPoint b/server/src/test/resources/META-INF/services/org.elasticsearch.plugins.loading.PluginsServiceTests$TestExtensionPoint similarity index 71% rename from server/src/test/resources/META-INF/services/org.elasticsearch.plugins.PluginsServiceTests$TestExtensionPoint rename to server/src/test/resources/META-INF/services/org.elasticsearch.plugins.loading.PluginsServiceTests$TestExtensionPoint index 65ca6fbcac9a5..20654d4496c23 100644 --- a/server/src/test/resources/META-INF/services/org.elasticsearch.plugins.PluginsServiceTests$TestExtensionPoint +++ b/server/src/test/resources/META-INF/services/org.elasticsearch.plugins.loading.PluginsServiceTests$TestExtensionPoint @@ -6,5 +6,5 @@ # Side Public License, v 1. # -org.elasticsearch.plugins.PluginsServiceTests$TestExtension1 -org.elasticsearch.plugins.PluginsServiceTests$TestExtension2 +org.elasticsearch.plugins.loading.PluginsServiceTests$TestExtension1 +org.elasticsearch.plugins.loading.PluginsServiceTests$TestExtension2 diff --git a/server/src/test/resources/org/elasticsearch/plugins/dummy-plugin.jar b/server/src/test/resources/org/elasticsearch/plugins/loading/dummy-plugin.jar similarity index 100% rename from server/src/test/resources/org/elasticsearch/plugins/dummy-plugin.jar rename to server/src/test/resources/org/elasticsearch/plugins/loading/dummy-plugin.jar diff --git a/server/src/test/resources/org/elasticsearch/plugins/non-extensible-plugin.jar b/server/src/test/resources/org/elasticsearch/plugins/loading/non-extensible-plugin.jar similarity index 100% rename from server/src/test/resources/org/elasticsearch/plugins/non-extensible-plugin.jar rename to server/src/test/resources/org/elasticsearch/plugins/loading/non-extensible-plugin.jar diff --git a/test/framework/src/main/java/org/elasticsearch/action/support/CancellableActionTestPlugin.java b/test/framework/src/main/java/org/elasticsearch/action/support/CancellableActionTestPlugin.java index dad0e3b613efb..d46e95aaef328 100644 --- a/test/framework/src/main/java/org/elasticsearch/action/support/CancellableActionTestPlugin.java +++ b/test/framework/src/main/java/org/elasticsearch/action/support/CancellableActionTestPlugin.java @@ -14,7 +14,7 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskCancelledException; diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index 8ef80c08517de..1b3ef22db7a86 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -215,7 +215,7 @@ public boolean implies(ProtectionDomain domain, Permission permission) { static Map getCodebases() { Map codebases = PolicyUtil.getCodebaseJarMap(JarHell.parseClassPath()); // when testing server, the main elasticsearch code is not yet in a jar, so we need to manually add it - addClassCodebase(codebases, "elasticsearch", "org.elasticsearch.plugins.PluginsService"); + addClassCodebase(codebases, "elasticsearch", "org.elasticsearch.plugins.loading.PluginsService"); addClassCodebase(codebases, "elasticsearch-plugin-classloader", "org.elasticsearch.plugins.loader.ExtendedPluginsClassLoader"); addClassCodebase(codebases, "elasticsearch-nio", "org.elasticsearch.nio.ChannelFactory"); addClassCodebase(codebases, "elasticsearch-secure-sm", "org.elasticsearch.secure_sm.SecureSM"); diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java index 520aff77497ba..a348be75d0449 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java +++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java @@ -28,9 +28,9 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.recovery.RecoverySettings; -import org.elasticsearch.plugins.MockPluginsService; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.MockPluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.readiness.MockReadinessService; import org.elasticsearch.readiness.ReadinessService; import org.elasticsearch.script.MockScriptService; diff --git a/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java b/test/framework/src/main/java/org/elasticsearch/plugins/loading/MockPluginsService.java similarity index 97% rename from test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java rename to test/framework/src/main/java/org/elasticsearch/plugins/loading/MockPluginsService.java index 9d8e7dedcc06d..a393953ca2f99 100644 --- a/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java +++ b/test/framework/src/main/java/org/elasticsearch/plugins/loading/MockPluginsService.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins; +package org.elasticsearch.plugins.loading; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -16,6 +16,9 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.env.Environment; import org.elasticsearch.jdk.ModuleQualifiedExportsService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginDescriptor; +import org.elasticsearch.plugins.PluginRuntimeInfo; import org.elasticsearch.plugins.spi.SPIClassIterator; import java.lang.reflect.Constructor; diff --git a/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java b/test/framework/src/main/java/org/elasticsearch/plugins/loading/PluginTestUtil.java similarity index 96% rename from test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java rename to test/framework/src/main/java/org/elasticsearch/plugins/loading/PluginTestUtil.java index 7edb46d3e3786..4418208a10272 100644 --- a/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java +++ b/test/framework/src/main/java/org/elasticsearch/plugins/loading/PluginTestUtil.java @@ -6,9 +6,10 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins; +package org.elasticsearch.plugins.loading; import org.elasticsearch.Version; +import org.elasticsearch.plugins.PluginDescriptor; import java.io.IOException; import java.io.OutputStream; diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index 271df2a971fb1..42332500a83a8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -65,11 +65,11 @@ import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.node.InternalSettingsPreparer; import org.elasticsearch.plugins.MapperPlugin; -import org.elasticsearch.plugins.MockPluginsService; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.plugins.loading.MockPluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.plugins.scanners.StablePluginsRegistry; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.MockScriptService; diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java index 5dc707e94bdd7..c4683bacc75c6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java @@ -21,7 +21,7 @@ import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.search.SearchService; diff --git a/test/framework/src/test/java/org/elasticsearch/plugins/MockPluginsServiceTests.java b/test/framework/src/test/java/org/elasticsearch/plugins/MockPluginsServiceTests.java index 055e56db3fb33..b8f8cecd3615d 100644 --- a/test/framework/src/test/java/org/elasticsearch/plugins/MockPluginsServiceTests.java +++ b/test/framework/src/test/java/org/elasticsearch/plugins/MockPluginsServiceTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.plugins.loading.MockPluginsService; import org.elasticsearch.test.ESTestCase; import org.junit.Before; diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityRestCancellationIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityRestCancellationIT.java index 5b2803c8f4186..e14ebcd0930be 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityRestCancellationIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityRestCancellationIT.java @@ -16,7 +16,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESIntegTestCase; diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java index ba72be655a7ff..19b306868738a 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java @@ -26,7 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckActionTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckActionTests.java index 80692efb7474a..a153b40f730f0 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckActionTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckActionTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; diff --git a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java index 80bb0368a1afc..44367b7de51ea 100644 --- a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java +++ b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java @@ -57,7 +57,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchResponseUtils; diff --git a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java index 414705aff0b79..efa19266a38fd 100644 --- a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java +++ b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java @@ -22,7 +22,7 @@ import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; diff --git a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java index 5250a1f764e5c..ef4f179bb93a2 100644 --- a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java +++ b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java @@ -29,7 +29,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.license.TestUtils; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.rest.RestUtils; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.Scope; diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/action/CancellationIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/action/CancellationIT.java index 183ef3786a62d..dc71c8fd46f79 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/action/CancellationIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/action/CancellationIT.java @@ -19,7 +19,7 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.search.lookup.LeafStoredFieldsLookup; import org.elasticsearch.tasks.CancellableTask; diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java index 42542b63c80d1..ece12dd4d1167 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java @@ -38,8 +38,8 @@ import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.license.LicenseSettings; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoriesService; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java index 2ced54a513146..9aa0201d6b4cc 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java @@ -31,7 +31,7 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.test.SecuritySingleNodeTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileCancellationIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileCancellationIntegTests.java index 87a5146113f72..5e004b9c048e5 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileCancellationIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileCancellationIntegTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.tasks.CancellableTask; diff --git a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java index 7ad54901e2d06..a562c379b301d 100644 --- a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java +++ b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java @@ -22,7 +22,7 @@ import org.elasticsearch.license.LicenseSettings; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; diff --git a/x-pack/plugin/write-load-forecaster/src/internalClusterTest/java/org/elasticsearch/xpack/writeloadforecaster/WriteLoadForecasterIT.java b/x-pack/plugin/write-load-forecaster/src/internalClusterTest/java/org/elasticsearch/xpack/writeloadforecaster/WriteLoadForecasterIT.java index cb93725b320d1..4b6e31ae7105f 100644 --- a/x-pack/plugin/write-load-forecaster/src/internalClusterTest/java/org/elasticsearch/xpack/writeloadforecaster/WriteLoadForecasterIT.java +++ b/x-pack/plugin/write-load-forecaster/src/internalClusterTest/java/org/elasticsearch/xpack/writeloadforecaster/WriteLoadForecasterIT.java @@ -32,7 +32,7 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentType; import org.junit.Before; From 43b2e877e0b937d491745081479e6a1b2edf93cc Mon Sep 17 00:00:00 2001 From: Patrick Doyle <810052+prdoyle@users.noreply.github.com> Date: Wed, 19 Jun 2024 18:10:50 -0400 Subject: [PATCH 44/44] Revert "Move PluginsService to its own internal package (#109872)" (#109946) This reverts commit b9e7965184efb6da6b06de56821db13911803c3a. --- .../script/ScriptScoreBenchmark.java | 2 +- .../resources/checkstyle_suppressions.xml | 2 +- .../plugin-cli/src/main/java/module-info.java | 25 ------------------ .../plugins/cli/InstallPluginAction.java | 2 +- .../plugins/cli/RemovePluginAction.java | 2 +- .../plugins/cli/InstallPluginActionTests.java | 2 +- .../plugins/cli/ListPluginsCommandTests.java | 2 +- .../plugins/cli/RemovePluginActionTests.java | 2 +- .../plugins/cli/SyncPluginsActionTests.java | 2 +- .../datastreams/DataStreamAutoshardingIT.java | 2 +- .../AbstractFeatureMigrationIntegTest.java | 2 +- .../s3/S3BlobStoreRepositoryMetricsTests.java | 2 +- .../s3/S3BlobStoreRepositoryTests.java | 2 +- .../s3/S3RepositoryThirdPartyTests.java | 2 +- .../s3/RepositoryCredentialsTests.java | 2 +- .../bootstrap/SpawnerNoBootstrapTests.java | 2 +- .../http/HealthRestCancellationIT.java | 2 +- .../http/SearchRestCancellationIT.java | 2 +- .../action/IndicesRequestIT.java | 2 +- .../action/admin/ReloadSecureSettingsIT.java | 2 +- .../admin/cluster/tasks/ListTasksIT.java | 2 +- ...tReplicationActionRetryOnClosedNodeIT.java | 2 +- .../coordination/RestHandlerNodesIT.java | 2 +- .../cluster/routing/ShardRoutingRoleIT.java | 2 +- .../indices/recovery/IndexRecoveryIT.java | 2 +- .../indices/recovery/TaskRecoveryIT.java | 2 +- .../metrics/NodeIndexingMetricsIT.java | 2 +- .../IndexFoldersDeletionListenerIT.java | 1 - .../threadpool/SimpleThreadPoolIT.java | 2 +- server/src/main/java/module-info.java | 1 - ...nsportNodesReloadSecureSettingsAction.java | 2 +- .../org/elasticsearch/bootstrap/Security.java | 2 +- .../org/elasticsearch/bootstrap/Spawner.java | 2 +- .../metadata/DataStreamFactoryRetention.java | 2 +- .../common/settings/ClusterSettings.java | 2 +- .../elasticsearch/indices/IndicesService.java | 2 +- .../indices/IndicesServiceBuilder.java | 2 +- .../java/org/elasticsearch/node/Node.java | 2 +- .../elasticsearch/node/NodeConstruction.java | 2 +- .../org/elasticsearch/node/NodeService.java | 2 +- .../node/NodeServiceProvider.java | 2 +- .../plugins/{loading => }/ModuleSupport.java | 2 +- .../plugins/{loading => }/PluginBundle.java | 4 +-- .../{loading => }/PluginIntrospector.java | 23 +--------------- .../PluginLoaderIndirection.java | 2 +- .../plugins/{loading => }/PluginsService.java | 7 +---- .../plugins/{loading => }/PluginsUtils.java | 3 +-- .../StablePluginPlaceHolder.java | 4 +-- .../{loading => }/UberModuleClassLoader.java | 2 +- .../plugins/internal/ReloadAwarePlugin.java | 3 +-- .../scanners/NamedComponentReader.java | 2 +- .../scanners/StablePluginsRegistry.java | 2 +- ...HierarchyCircuitBreakerTelemetryTests.java | 2 +- .../org/elasticsearch/node/NodeTests.java | 2 +- .../plugins/PluginDescriptorTests.java | 2 -- .../PluginIntrospectorTests.java | 23 +--------------- .../{loading => }/PluginsServiceTests.java | 15 ++++------- .../{loading => }/PluginsUtilsTests.java | 3 +-- .../UberModuleClassLoaderTests.java | 2 +- .../internal/ReloadAwarePluginTests.java | 2 +- .../scanners/StablePluginsRegistryTests.java | 2 +- .../SearchTookTimeTelemetryTests.java | 2 +- .../SearchTransportTelemetryTests.java | 2 +- .../snapshots/SnapshotResiliencyTests.java | 2 +- ...ns.PluginsServiceTests$TestExtensionPoint} | 4 +-- .../plugins/{loading => }/dummy-plugin.jar | Bin .../{loading => }/non-extensible-plugin.jar | Bin .../support/CancellableActionTestPlugin.java | 2 +- .../bootstrap/BootstrapForTesting.java | 2 +- .../java/org/elasticsearch/node/MockNode.java | 4 +-- .../{loading => }/MockPluginsService.java | 5 +--- .../plugins/{loading => }/PluginTestUtil.java | 3 +-- .../test/AbstractBuilderTestCase.java | 4 +-- .../AbstractSearchCancellationTestCase.java | 2 +- .../plugins/MockPluginsServiceTests.java | 1 - ...AutoscalingCapacityRestCancellationIT.java | 2 +- .../TransportNodeDeprecationCheckAction.java | 2 +- ...nsportNodeDeprecationCheckActionTests.java | 2 +- .../DownsampleActionSingleNodeTests.java | 2 +- .../AbstractEqlBlockingIntegTestCase.java | 2 +- .../exporter/http/HttpExporterIT.java | 2 +- .../profiling/action/CancellationIT.java | 2 +- ...archableSnapshotsPrewarmingIntegTests.java | 2 +- .../authc/jwt/JwtRealmSingleNodeTests.java | 2 +- .../ProfileCancellationIntegTests.java | 2 +- .../AbstractSqlBlockingIntegTestCase.java | 2 +- .../WriteLoadForecasterIT.java | 2 +- 87 files changed, 87 insertions(+), 180 deletions(-) delete mode 100644 distribution/tools/plugin-cli/src/main/java/module-info.java rename server/src/main/java/org/elasticsearch/plugins/{loading => }/ModuleSupport.java (99%) rename server/src/main/java/org/elasticsearch/plugins/{loading => }/PluginBundle.java (96%) rename server/src/main/java/org/elasticsearch/plugins/{loading => }/PluginIntrospector.java (87%) rename server/src/main/java/org/elasticsearch/plugins/{loading => }/PluginLoaderIndirection.java (95%) rename server/src/main/java/org/elasticsearch/plugins/{loading => }/PluginsService.java (99%) rename server/src/main/java/org/elasticsearch/plugins/{loading => }/PluginsUtils.java (99%) rename server/src/main/java/org/elasticsearch/plugins/{loading => }/StablePluginPlaceHolder.java (85%) rename server/src/main/java/org/elasticsearch/plugins/{loading => }/UberModuleClassLoader.java (99%) rename server/src/test/java/org/elasticsearch/plugins/{loading => }/PluginIntrospectorTests.java (93%) rename server/src/test/java/org/elasticsearch/plugins/{loading => }/PluginsServiceTests.java (98%) rename server/src/test/java/org/elasticsearch/plugins/{loading => }/PluginsUtilsTests.java (99%) rename server/src/test/java/org/elasticsearch/plugins/{loading => }/UberModuleClassLoaderTests.java (99%) rename server/src/test/resources/META-INF/services/{org.elasticsearch.plugins.loading.PluginsServiceTests$TestExtensionPoint => org.elasticsearch.plugins.PluginsServiceTests$TestExtensionPoint} (71%) rename server/src/test/resources/org/elasticsearch/plugins/{loading => }/dummy-plugin.jar (100%) rename server/src/test/resources/org/elasticsearch/plugins/{loading => }/non-extensible-plugin.jar (100%) rename test/framework/src/main/java/org/elasticsearch/plugins/{loading => }/MockPluginsService.java (97%) rename test/framework/src/main/java/org/elasticsearch/plugins/{loading => }/PluginTestUtil.java (96%) diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java index fb502302be8f0..5a27abe8be2a4 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java @@ -33,8 +33,8 @@ import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ScriptPlugin; -import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.script.DocReader; import org.elasticsearch.script.DocValuesDocReader; import org.elasticsearch.script.ScoreScript; diff --git a/build-tools-internal/src/main/resources/checkstyle_suppressions.xml b/build-tools-internal/src/main/resources/checkstyle_suppressions.xml index de5a8aea34980..fd01993951959 100644 --- a/build-tools-internal/src/main/resources/checkstyle_suppressions.xml +++ b/build-tools-internal/src/main/resources/checkstyle_suppressions.xml @@ -20,7 +20,7 @@ - + diff --git a/distribution/tools/plugin-cli/src/main/java/module-info.java b/distribution/tools/plugin-cli/src/main/java/module-info.java deleted file mode 100644 index 4898125790e37..0000000000000 --- a/distribution/tools/plugin-cli/src/main/java/module-info.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -import org.elasticsearch.cli.CliToolProvider; - -module org.elasticsearch.plugins.cli { - requires jopt.simple; - requires org.apache.lucene.core; - requires org.apache.lucene.suggest; - requires org.bouncycastle.fips.core; - requires org.bouncycastle.pg; - requires org.elasticsearch.base; - requires org.elasticsearch.cli; - requires org.elasticsearch.plugin.scanner; - requires org.elasticsearch.server; - requires org.elasticsearch.xcontent; - requires org.objectweb.asm; - - provides CliToolProvider with org.elasticsearch.plugins.cli.PluginCliProvider, org.elasticsearch.plugins.cli.SyncPluginsCliProvider; -} diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java index 9bfe000feb81e..c7bee4a6c172d 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java @@ -41,7 +41,7 @@ import org.elasticsearch.plugin.scanner.NamedComponentScanner; import org.elasticsearch.plugins.Platforms; import org.elasticsearch.plugins.PluginDescriptor; -import org.elasticsearch.plugins.loading.PluginsUtils; +import org.elasticsearch.plugins.PluginsUtils; import org.objectweb.asm.ClassReader; import java.io.BufferedReader; diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/RemovePluginAction.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/RemovePluginAction.java index 6d19641fb372a..4714ef202b258 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/RemovePluginAction.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/RemovePluginAction.java @@ -13,7 +13,7 @@ import org.elasticsearch.cli.UserException; import org.elasticsearch.core.IOUtils; import org.elasticsearch.env.Environment; -import org.elasticsearch.plugins.loading.PluginsUtils; +import org.elasticsearch.plugins.PluginsUtils; import java.io.IOException; import java.nio.file.FileAlreadyExistsException; diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java index fe18507398e1c..3dc7af07d4d83 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java @@ -50,7 +50,7 @@ import org.elasticsearch.plugin.scanner.NamedComponentScanner; import org.elasticsearch.plugins.Platforms; import org.elasticsearch.plugins.PluginDescriptor; -import org.elasticsearch.plugins.loading.PluginTestUtil; +import org.elasticsearch.plugins.PluginTestUtil; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.PosixPermissionsResetter; import org.elasticsearch.test.compiler.InMemoryJavaCompiler; diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/ListPluginsCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/ListPluginsCommandTests.java index cec6b2c7881b1..b225bc441794a 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/ListPluginsCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/ListPluginsCommandTests.java @@ -18,7 +18,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; -import org.elasticsearch.plugins.loading.PluginTestUtil; +import org.elasticsearch.plugins.PluginTestUtil; import org.junit.Before; import java.io.IOException; diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/RemovePluginActionTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/RemovePluginActionTests.java index 9d9300155e9cb..73e89fc948029 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/RemovePluginActionTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/RemovePluginActionTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; -import org.elasticsearch.plugins.loading.PluginTestUtil; +import org.elasticsearch.plugins.PluginTestUtil; import org.elasticsearch.test.ESTestCase; import org.junit.Before; diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java index 1e9002a7db392..9802b4039bb7b 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java @@ -14,8 +14,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.plugins.PluginTestUtil; import org.elasticsearch.plugins.cli.SyncPluginsAction.PluginChanges; -import org.elasticsearch.plugins.loading.PluginTestUtil; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.hamcrest.Matchers; diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java index 7bc0192b723ce..a4c9a9d3e1c67 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java @@ -51,7 +51,7 @@ import org.elasticsearch.index.shard.ShardPath; import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.telemetry.InstrumentType; import org.elasticsearch.telemetry.Measurement; diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java index f7e07a82e00fb..3bbc8e4b969ee 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java @@ -28,8 +28,8 @@ import org.elasticsearch.indices.AssociatedIndexDescriptor; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.SystemIndexPlugin; -import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentBuilder; diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java index d88cf885ef921..f8503bca3ec67 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.s3.S3BlobStore.Operation; diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 3b9eff683dbbf..88f0e01db3e6a 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -38,7 +38,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java index 8ff9fb478c402..5064910723ab6 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -30,7 +30,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.blobstore.RequestedRangeNotSatisfiedException; diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index bd7827be1cca4..9a1d12fab0af5 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -21,7 +21,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.rest.AbstractRestChannel; import org.elasticsearch.rest.RestRequest; diff --git a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java index 96de064424312..6369e02e1f605 100644 --- a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java +++ b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.plugins.Platforms; -import org.elasticsearch.plugins.loading.PluginTestUtil; +import org.elasticsearch.plugins.PluginTestUtil; import org.elasticsearch.test.GraalVMThreadsFilter; import org.elasticsearch.test.MockLog; diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HealthRestCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HealthRestCancellationIT.java index 6da08b71a6d58..dc298fc616506 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HealthRestCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HealthRestCancellationIT.java @@ -24,7 +24,7 @@ import org.elasticsearch.health.node.HealthInfo; import org.elasticsearch.plugins.HealthPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import java.util.ArrayList; import java.util.Collection; diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java index 462be7eb85283..73dd1525f8a08 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java index 7969e401d1677..920677e8c4b4a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java @@ -73,7 +73,7 @@ import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java index 9eb603ad894d3..5d4a922ec3e11 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java @@ -23,8 +23,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ReloadablePlugin; -import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.test.ESIntegTestCase; import org.junit.BeforeClass; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java index 0b0344187c8c6..4a076cb3b6e66 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java @@ -21,7 +21,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java index 29abeb8badf90..b89cea7dff089 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java @@ -27,7 +27,7 @@ import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RestHandlerNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RestHandlerNodesIT.java index 08ae42c57fff1..5a7f4609a7d0f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RestHandlerNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RestHandlerNodesIT.java @@ -18,7 +18,7 @@ import org.elasticsearch.features.NodeFeature; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java index 8aacf625e82cf..bb9324dd7d10c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java @@ -48,7 +48,7 @@ import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index ea7df1cacf3e2..4f15b82ca1f16 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -104,7 +104,7 @@ import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/TaskRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/TaskRecoveryIT.java index 833bd3a8c9e3c..16905dc5b4d37 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/TaskRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/TaskRecoveryIT.java @@ -16,7 +16,7 @@ import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java index ce0d7419e96e1..7b26cc5edf1bc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java @@ -19,7 +19,7 @@ import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.telemetry.Measurement; import org.elasticsearch.telemetry.TestTelemetryPlugin; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java index 2d1b829b2e2dd..542a4cd2c4c92 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java @@ -21,7 +21,6 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java b/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java index a0bc6dd933756..44b6ef1d51ce0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.telemetry.InstrumentType; import org.elasticsearch.telemetry.Measurement; import org.elasticsearch.telemetry.TestTelemetryPlugin; diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 4138036cbcea1..db7e3d40518ba 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -460,5 +460,4 @@ org.elasticsearch.serverless.shardhealth, org.elasticsearch.serverless.apifiltering; exports org.elasticsearch.lucene.spatial; - exports org.elasticsearch.plugins.loading to org.elasticsearch.deprecation, org.elasticsearch.plugins.cli; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index 559bf803ef62a..f906b7d659b7b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -24,8 +24,8 @@ import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ReloadablePlugin; -import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Security.java b/server/src/main/java/org/elasticsearch/bootstrap/Security.java index 7b05e974f18dc..12edf344c72a2 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -19,7 +19,7 @@ import org.elasticsearch.jdk.JarHell; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; -import org.elasticsearch.plugins.loading.PluginsUtils; +import org.elasticsearch.plugins.PluginsUtils; import org.elasticsearch.secure_sm.SecureSM; import org.elasticsearch.transport.TcpTransport; diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Spawner.java b/server/src/main/java/org/elasticsearch/bootstrap/Spawner.java index 0910b75563dbc..2d37da1d10245 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Spawner.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Spawner.java @@ -16,7 +16,7 @@ import org.elasticsearch.nativeaccess.NativeAccess; import org.elasticsearch.plugins.Platforms; import org.elasticsearch.plugins.PluginDescriptor; -import org.elasticsearch.plugins.loading.PluginsUtils; +import org.elasticsearch.plugins.PluginsUtils; import java.io.BufferedReader; import java.io.Closeable; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java index 8f4b48aff6be5..5b96f92193e98 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java @@ -11,7 +11,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; /** * Holds the factory retention configuration. Factory retention is the global retention configuration meant to be diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 944f0967456e3..d5f770ebb95fc 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -106,7 +106,7 @@ import org.elasticsearch.node.NodeRoleSettings; import org.elasticsearch.persistent.PersistentTasksClusterService; import org.elasticsearch.persistent.decider.EnableAssignmentDecider; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.readiness.ReadinessService; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.BaseRestHandler; diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 79e1334211a2d..199bbc54fa3d6 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -133,7 +133,7 @@ import org.elasticsearch.node.Node; import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.IndexStorePlugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java b/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java index 0ba62ccc7aa41..d56cf3c2c1e1a 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java @@ -29,7 +29,7 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.IndexStorePlugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.search.internal.ShardSearchRequest; diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index bbc0a4183c3b6..11eb8760b2dbb 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -68,7 +68,7 @@ import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.MetadataUpgrader; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.readiness.ReadinessService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.reservedstate.service.FileSettingsService; diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 8b6b0a6d9c992..bcf8451e5fe54 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -156,6 +156,7 @@ import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.PersistentTaskPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.RecoveryPlannerPlugin; import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.plugins.RepositoryPlugin; @@ -170,7 +171,6 @@ import org.elasticsearch.plugins.internal.ReloadAwarePlugin; import org.elasticsearch.plugins.internal.RestExtension; import org.elasticsearch.plugins.internal.SettingsExtension; -import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.readiness.ReadinessService; import org.elasticsearch.repositories.RepositoriesModule; import org.elasticsearch.repositories.RepositoriesService; diff --git a/server/src/main/java/org/elasticsearch/node/NodeService.java b/server/src/main/java/org/elasticsearch/node/NodeService.java index ee99a1bba19e1..059b05091a6ae 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeService.java +++ b/server/src/main/java/org/elasticsearch/node/NodeService.java @@ -31,7 +31,7 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.ingest.IngestService; import org.elasticsearch.monitor.MonitorService; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.support.AggregationUsageService; diff --git a/server/src/main/java/org/elasticsearch/node/NodeServiceProvider.java b/server/src/main/java/org/elasticsearch/node/NodeServiceProvider.java index 9114f2da6f43c..914dd51d0c6b2 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeServiceProvider.java +++ b/server/src/main/java/org/elasticsearch/node/NodeServiceProvider.java @@ -26,7 +26,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.recovery.RecoverySettings; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.readiness.ReadinessService; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; diff --git a/server/src/main/java/org/elasticsearch/plugins/loading/ModuleSupport.java b/server/src/main/java/org/elasticsearch/plugins/ModuleSupport.java similarity index 99% rename from server/src/main/java/org/elasticsearch/plugins/loading/ModuleSupport.java rename to server/src/main/java/org/elasticsearch/plugins/ModuleSupport.java index 5072f21ec0552..e5f0004431b0e 100644 --- a/server/src/main/java/org/elasticsearch/plugins/loading/ModuleSupport.java +++ b/server/src/main/java/org/elasticsearch/plugins/ModuleSupport.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins.loading; +package org.elasticsearch.plugins; import org.elasticsearch.core.SuppressForbidden; diff --git a/server/src/main/java/org/elasticsearch/plugins/loading/PluginBundle.java b/server/src/main/java/org/elasticsearch/plugins/PluginBundle.java similarity index 96% rename from server/src/main/java/org/elasticsearch/plugins/loading/PluginBundle.java rename to server/src/main/java/org/elasticsearch/plugins/PluginBundle.java index 4c9c25a71d994..154ffce6ba05f 100644 --- a/server/src/main/java/org/elasticsearch/plugins/loading/PluginBundle.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginBundle.java @@ -6,9 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins.loading; - -import org.elasticsearch.plugins.PluginDescriptor; +package org.elasticsearch.plugins; import java.io.IOException; import java.net.URL; diff --git a/server/src/main/java/org/elasticsearch/plugins/loading/PluginIntrospector.java b/server/src/main/java/org/elasticsearch/plugins/PluginIntrospector.java similarity index 87% rename from server/src/main/java/org/elasticsearch/plugins/loading/PluginIntrospector.java rename to server/src/main/java/org/elasticsearch/plugins/PluginIntrospector.java index e3fb9e9c2e07d..8b7e4faa1f226 100644 --- a/server/src/main/java/org/elasticsearch/plugins/loading/PluginIntrospector.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginIntrospector.java @@ -6,30 +6,9 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins.loading; +package org.elasticsearch.plugins; import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.plugins.ActionPlugin; -import org.elasticsearch.plugins.AnalysisPlugin; -import org.elasticsearch.plugins.CircuitBreakerPlugin; -import org.elasticsearch.plugins.ClusterPlugin; -import org.elasticsearch.plugins.DiscoveryPlugin; -import org.elasticsearch.plugins.EnginePlugin; -import org.elasticsearch.plugins.ExtensiblePlugin; -import org.elasticsearch.plugins.HealthPlugin; -import org.elasticsearch.plugins.IndexStorePlugin; -import org.elasticsearch.plugins.IngestPlugin; -import org.elasticsearch.plugins.MapperPlugin; -import org.elasticsearch.plugins.NetworkPlugin; -import org.elasticsearch.plugins.PersistentTaskPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.RecoveryPlannerPlugin; -import org.elasticsearch.plugins.ReloadablePlugin; -import org.elasticsearch.plugins.RepositoryPlugin; -import org.elasticsearch.plugins.ScriptPlugin; -import org.elasticsearch.plugins.SearchPlugin; -import org.elasticsearch.plugins.ShutdownAwarePlugin; -import org.elasticsearch.plugins.SystemIndexPlugin; import java.lang.reflect.Method; import java.lang.reflect.Modifier; diff --git a/server/src/main/java/org/elasticsearch/plugins/loading/PluginLoaderIndirection.java b/server/src/main/java/org/elasticsearch/plugins/PluginLoaderIndirection.java similarity index 95% rename from server/src/main/java/org/elasticsearch/plugins/loading/PluginLoaderIndirection.java rename to server/src/main/java/org/elasticsearch/plugins/PluginLoaderIndirection.java index d0f41eb1cbaf2..d4a703c163025 100644 --- a/server/src/main/java/org/elasticsearch/plugins/loading/PluginLoaderIndirection.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginLoaderIndirection.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins.loading; +package org.elasticsearch.plugins; import org.elasticsearch.plugins.loader.ExtendedPluginsClassLoader; diff --git a/server/src/main/java/org/elasticsearch/plugins/loading/PluginsService.java b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java similarity index 99% rename from server/src/main/java/org/elasticsearch/plugins/loading/PluginsService.java rename to server/src/main/java/org/elasticsearch/plugins/PluginsService.java index 2f6b6ed3ab0e1..038b03c5dd93f 100644 --- a/server/src/main/java/org/elasticsearch/plugins/loading/PluginsService.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins.loading; +package org.elasticsearch.plugins; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -28,11 +28,6 @@ import org.elasticsearch.jdk.JarHell; import org.elasticsearch.jdk.ModuleQualifiedExportsService; import org.elasticsearch.node.ReportingService; -import org.elasticsearch.plugins.ExtensiblePlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginApiInfo; -import org.elasticsearch.plugins.PluginDescriptor; -import org.elasticsearch.plugins.PluginRuntimeInfo; import org.elasticsearch.plugins.scanners.StablePluginsRegistry; import org.elasticsearch.plugins.spi.SPIClassIterator; diff --git a/server/src/main/java/org/elasticsearch/plugins/loading/PluginsUtils.java b/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java similarity index 99% rename from server/src/main/java/org/elasticsearch/plugins/loading/PluginsUtils.java rename to server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java index 8c90e8abc1818..becc5ef05e846 100644 --- a/server/src/main/java/org/elasticsearch/plugins/loading/PluginsUtils.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins.loading; +package org.elasticsearch.plugins; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -14,7 +14,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.jdk.JarHell; -import org.elasticsearch.plugins.PluginDescriptor; import java.io.IOException; import java.net.URL; diff --git a/server/src/main/java/org/elasticsearch/plugins/loading/StablePluginPlaceHolder.java b/server/src/main/java/org/elasticsearch/plugins/StablePluginPlaceHolder.java similarity index 85% rename from server/src/main/java/org/elasticsearch/plugins/loading/StablePluginPlaceHolder.java rename to server/src/main/java/org/elasticsearch/plugins/StablePluginPlaceHolder.java index 09bff29977686..c1bc8fcfd12b7 100644 --- a/server/src/main/java/org/elasticsearch/plugins/loading/StablePluginPlaceHolder.java +++ b/server/src/main/java/org/elasticsearch/plugins/StablePluginPlaceHolder.java @@ -6,9 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins.loading; - -import org.elasticsearch.plugins.Plugin; +package org.elasticsearch.plugins; class StablePluginPlaceHolder extends Plugin { private final String name; diff --git a/server/src/main/java/org/elasticsearch/plugins/loading/UberModuleClassLoader.java b/server/src/main/java/org/elasticsearch/plugins/UberModuleClassLoader.java similarity index 99% rename from server/src/main/java/org/elasticsearch/plugins/loading/UberModuleClassLoader.java rename to server/src/main/java/org/elasticsearch/plugins/UberModuleClassLoader.java index 6e0d33f7081af..dca3afb2ed745 100644 --- a/server/src/main/java/org/elasticsearch/plugins/loading/UberModuleClassLoader.java +++ b/server/src/main/java/org/elasticsearch/plugins/UberModuleClassLoader.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins.loading; +package org.elasticsearch.plugins; import org.elasticsearch.core.SuppressForbidden; diff --git a/server/src/main/java/org/elasticsearch/plugins/internal/ReloadAwarePlugin.java b/server/src/main/java/org/elasticsearch/plugins/internal/ReloadAwarePlugin.java index 3463d3ec64498..71228efe227c5 100644 --- a/server/src/main/java/org/elasticsearch/plugins/internal/ReloadAwarePlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/internal/ReloadAwarePlugin.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.ReloadablePlugin; -import org.elasticsearch.plugins.loading.PluginsService; /** * A plugin that may receive a {@link ReloadablePlugin} in order to @@ -23,7 +22,7 @@ public interface ReloadAwarePlugin { * *

    This callback is in the form of an implementation of {@link ReloadablePlugin}, * but the implementation does not need to be a {@link org.elasticsearch.plugins.Plugin}, - * or be registered with {@link PluginsService}. + * or be registered with {@link org.elasticsearch.plugins.PluginsService}. * * @param reloadablePlugin A plugin that this plugin may be able to reload */ diff --git a/server/src/main/java/org/elasticsearch/plugins/scanners/NamedComponentReader.java b/server/src/main/java/org/elasticsearch/plugins/scanners/NamedComponentReader.java index 96ddba9f2a175..ed32bd245977a 100644 --- a/server/src/main/java/org/elasticsearch/plugins/scanners/NamedComponentReader.java +++ b/server/src/main/java/org/elasticsearch/plugins/scanners/NamedComponentReader.java @@ -11,7 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.core.Strings; -import org.elasticsearch.plugins.loading.PluginBundle; +import org.elasticsearch.plugins.PluginBundle; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.io.BufferedInputStream; diff --git a/server/src/main/java/org/elasticsearch/plugins/scanners/StablePluginsRegistry.java b/server/src/main/java/org/elasticsearch/plugins/scanners/StablePluginsRegistry.java index 5c56f040a75a3..6e2780a08251f 100644 --- a/server/src/main/java/org/elasticsearch/plugins/scanners/StablePluginsRegistry.java +++ b/server/src/main/java/org/elasticsearch/plugins/scanners/StablePluginsRegistry.java @@ -8,7 +8,7 @@ package org.elasticsearch.plugins.scanners; -import org.elasticsearch.plugins.loading.PluginBundle; +import org.elasticsearch.plugins.PluginBundle; import java.util.Collection; import java.util.Collections; diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java index 4e94827380a6e..2cbe1202520df 100644 --- a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.telemetry.Measurement; import org.elasticsearch.telemetry.RecordingInstruments; diff --git a/server/src/test/java/org/elasticsearch/node/NodeTests.java b/server/src/test/java/org/elasticsearch/node/NodeTests.java index dcee2f3cf9a8b..b36cafd694378 100644 --- a/server/src/test/java/org/elasticsearch/node/NodeTests.java +++ b/server/src/test/java/org/elasticsearch/node/NodeTests.java @@ -36,8 +36,8 @@ import org.elasticsearch.plugins.CircuitBreakerPlugin; import org.elasticsearch.plugins.ClusterCoordinationPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsServiceTests; import org.elasticsearch.plugins.RecoveryPlannerPlugin; -import org.elasticsearch.plugins.loading.PluginsServiceTests; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java index bc14f87f5b580..7ed4d975fe3be 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java @@ -12,8 +12,6 @@ import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; import org.elasticsearch.common.io.stream.ByteBufferStreamInput; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.plugins.loading.PluginTestUtil; -import org.elasticsearch.plugins.loading.PluginsUtils; import org.elasticsearch.test.ESTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/plugins/loading/PluginIntrospectorTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java similarity index 93% rename from server/src/test/java/org/elasticsearch/plugins/loading/PluginIntrospectorTests.java rename to server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java index d341ecd72273d..5e80b6d217a55 100644 --- a/server/src/test/java/org/elasticsearch/plugins/loading/PluginIntrospectorTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins.loading; +package org.elasticsearch.plugins; import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; @@ -22,27 +22,6 @@ import org.elasticsearch.indices.recovery.plan.RecoveryPlannerService; import org.elasticsearch.indices.recovery.plan.ShardSnapshotsService; import org.elasticsearch.ingest.Processor; -import org.elasticsearch.plugins.ActionPlugin; -import org.elasticsearch.plugins.AnalysisPlugin; -import org.elasticsearch.plugins.CircuitBreakerPlugin; -import org.elasticsearch.plugins.ClusterPlugin; -import org.elasticsearch.plugins.DiscoveryPlugin; -import org.elasticsearch.plugins.EnginePlugin; -import org.elasticsearch.plugins.ExtensiblePlugin; -import org.elasticsearch.plugins.HealthPlugin; -import org.elasticsearch.plugins.IndexStorePlugin; -import org.elasticsearch.plugins.IngestPlugin; -import org.elasticsearch.plugins.MapperPlugin; -import org.elasticsearch.plugins.NetworkPlugin; -import org.elasticsearch.plugins.PersistentTaskPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.RecoveryPlannerPlugin; -import org.elasticsearch.plugins.ReloadablePlugin; -import org.elasticsearch.plugins.RepositoryPlugin; -import org.elasticsearch.plugins.ScriptPlugin; -import org.elasticsearch.plugins.SearchPlugin; -import org.elasticsearch.plugins.ShutdownAwarePlugin; -import org.elasticsearch.plugins.SystemIndexPlugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.PrivilegedOperations; import org.elasticsearch.test.compiler.InMemoryJavaCompiler; diff --git a/server/src/test/java/org/elasticsearch/plugins/loading/PluginsServiceTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java similarity index 98% rename from server/src/test/java/org/elasticsearch/plugins/loading/PluginsServiceTests.java rename to server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index 6e4984e2dcf6d..28ebed88e8f3c 100644 --- a/server/src/test/java/org/elasticsearch/plugins/loading/PluginsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins.loading; +package org.elasticsearch.plugins; import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.Constants; @@ -19,11 +19,6 @@ import org.elasticsearch.index.IndexModule; import org.elasticsearch.jdk.ModuleQualifiedExportsService; import org.elasticsearch.plugin.analysis.CharFilterFactory; -import org.elasticsearch.plugins.ExtensiblePlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginDescriptor; -import org.elasticsearch.plugins.PluginRuntimeInfo; -import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.plugins.scanners.PluginInfo; import org.elasticsearch.plugins.spi.BarPlugin; import org.elasticsearch.plugins.spi.BarTestService; @@ -318,18 +313,18 @@ public void testNonExtensibleDep() throws Exception { public void testPassingMandatoryPluginCheck() { PluginsService.checkMandatoryPlugins( - Set.of("org.elasticsearch.plugins.loading.PluginsServiceTests$FakePlugin"), - Set.of("org.elasticsearch.plugins.loading.PluginsServiceTests$FakePlugin") + Set.of("org.elasticsearch.plugins.PluginsServiceTests$FakePlugin"), + Set.of("org.elasticsearch.plugins.PluginsServiceTests$FakePlugin") ); } public void testFailingMandatoryPluginCheck() { IllegalStateException e = expectThrows( IllegalStateException.class, - () -> PluginsService.checkMandatoryPlugins(Set.of(), Set.of("org.elasticsearch.plugins.loading.PluginsServiceTests$FakePlugin")) + () -> PluginsService.checkMandatoryPlugins(Set.of(), Set.of("org.elasticsearch.plugins.PluginsServiceTests$FakePlugin")) ); assertEquals( - "missing mandatory plugins [org.elasticsearch.plugins.loading.PluginsServiceTests$FakePlugin], found plugins []", + "missing mandatory plugins [org.elasticsearch.plugins.PluginsServiceTests$FakePlugin], found plugins []", e.getMessage() ); } diff --git a/server/src/test/java/org/elasticsearch/plugins/loading/PluginsUtilsTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java similarity index 99% rename from server/src/test/java/org/elasticsearch/plugins/loading/PluginsUtilsTests.java rename to server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java index f44de9ac1c704..a7cc74582afdc 100644 --- a/server/src/test/java/org/elasticsearch/plugins/loading/PluginsUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins.loading; +package org.elasticsearch.plugins; import org.apache.logging.log4j.Level; import org.apache.lucene.tests.util.LuceneTestCase; @@ -14,7 +14,6 @@ import org.elasticsearch.Version; import org.elasticsearch.core.PathUtils; import org.elasticsearch.jdk.JarHell; -import org.elasticsearch.plugins.PluginDescriptor; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; diff --git a/server/src/test/java/org/elasticsearch/plugins/loading/UberModuleClassLoaderTests.java b/server/src/test/java/org/elasticsearch/plugins/UberModuleClassLoaderTests.java similarity index 99% rename from server/src/test/java/org/elasticsearch/plugins/loading/UberModuleClassLoaderTests.java rename to server/src/test/java/org/elasticsearch/plugins/UberModuleClassLoaderTests.java index 6a12009acce41..e3cd11c8f3b68 100644 --- a/server/src/test/java/org/elasticsearch/plugins/loading/UberModuleClassLoaderTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/UberModuleClassLoaderTests.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins.loading; +package org.elasticsearch.plugins; import org.elasticsearch.common.Strings; import org.elasticsearch.test.ESTestCase; diff --git a/server/src/test/java/org/elasticsearch/plugins/internal/ReloadAwarePluginTests.java b/server/src/test/java/org/elasticsearch/plugins/internal/ReloadAwarePluginTests.java index 4553a65944d30..2401ad25193a2 100644 --- a/server/src/test/java/org/elasticsearch/plugins/internal/ReloadAwarePluginTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/internal/ReloadAwarePluginTests.java @@ -11,8 +11,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.MockNode; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ReloadablePlugin; -import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.netty4.Netty4Plugin; diff --git a/server/src/test/java/org/elasticsearch/plugins/scanners/StablePluginsRegistryTests.java b/server/src/test/java/org/elasticsearch/plugins/scanners/StablePluginsRegistryTests.java index b37da49034f78..276cbdbc1a452 100644 --- a/server/src/test/java/org/elasticsearch/plugins/scanners/StablePluginsRegistryTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/scanners/StablePluginsRegistryTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.plugins.scanners; -import org.elasticsearch.plugins.loading.PluginBundle; +import org.elasticsearch.plugins.PluginBundle; import org.elasticsearch.test.ESTestCase; import org.mockito.Mockito; diff --git a/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTookTimeTelemetryTests.java b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTookTimeTelemetryTests.java index 5dad16c5be397..850af7f85f76a 100644 --- a/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTookTimeTelemetryTests.java +++ b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTookTimeTelemetryTests.java @@ -12,7 +12,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.telemetry.Measurement; import org.elasticsearch.telemetry.TestTelemetryPlugin; import org.elasticsearch.test.ESSingleNodeTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java index 5edb7147ea259..c7fc11e81483f 100644 --- a/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java +++ b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.telemetry.Measurement; import org.elasticsearch.telemetry.TestTelemetryPlugin; import org.elasticsearch.test.ESSingleNodeTestCase; diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 3100bdd6db529..f4aa44f143c40 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -162,8 +162,8 @@ import org.elasticsearch.ingest.IngestService; import org.elasticsearch.monitor.StatusInfo; import org.elasticsearch.node.ResponseCollectorService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.internal.DocumentParsingProvider; -import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.plugins.scanners.StablePluginsRegistry; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; diff --git a/server/src/test/resources/META-INF/services/org.elasticsearch.plugins.loading.PluginsServiceTests$TestExtensionPoint b/server/src/test/resources/META-INF/services/org.elasticsearch.plugins.PluginsServiceTests$TestExtensionPoint similarity index 71% rename from server/src/test/resources/META-INF/services/org.elasticsearch.plugins.loading.PluginsServiceTests$TestExtensionPoint rename to server/src/test/resources/META-INF/services/org.elasticsearch.plugins.PluginsServiceTests$TestExtensionPoint index 20654d4496c23..65ca6fbcac9a5 100644 --- a/server/src/test/resources/META-INF/services/org.elasticsearch.plugins.loading.PluginsServiceTests$TestExtensionPoint +++ b/server/src/test/resources/META-INF/services/org.elasticsearch.plugins.PluginsServiceTests$TestExtensionPoint @@ -6,5 +6,5 @@ # Side Public License, v 1. # -org.elasticsearch.plugins.loading.PluginsServiceTests$TestExtension1 -org.elasticsearch.plugins.loading.PluginsServiceTests$TestExtension2 +org.elasticsearch.plugins.PluginsServiceTests$TestExtension1 +org.elasticsearch.plugins.PluginsServiceTests$TestExtension2 diff --git a/server/src/test/resources/org/elasticsearch/plugins/loading/dummy-plugin.jar b/server/src/test/resources/org/elasticsearch/plugins/dummy-plugin.jar similarity index 100% rename from server/src/test/resources/org/elasticsearch/plugins/loading/dummy-plugin.jar rename to server/src/test/resources/org/elasticsearch/plugins/dummy-plugin.jar diff --git a/server/src/test/resources/org/elasticsearch/plugins/loading/non-extensible-plugin.jar b/server/src/test/resources/org/elasticsearch/plugins/non-extensible-plugin.jar similarity index 100% rename from server/src/test/resources/org/elasticsearch/plugins/loading/non-extensible-plugin.jar rename to server/src/test/resources/org/elasticsearch/plugins/non-extensible-plugin.jar diff --git a/test/framework/src/main/java/org/elasticsearch/action/support/CancellableActionTestPlugin.java b/test/framework/src/main/java/org/elasticsearch/action/support/CancellableActionTestPlugin.java index d46e95aaef328..dad0e3b613efb 100644 --- a/test/framework/src/main/java/org/elasticsearch/action/support/CancellableActionTestPlugin.java +++ b/test/framework/src/main/java/org/elasticsearch/action/support/CancellableActionTestPlugin.java @@ -14,7 +14,7 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskCancelledException; diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index 1b3ef22db7a86..8ef80c08517de 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -215,7 +215,7 @@ public boolean implies(ProtectionDomain domain, Permission permission) { static Map getCodebases() { Map codebases = PolicyUtil.getCodebaseJarMap(JarHell.parseClassPath()); // when testing server, the main elasticsearch code is not yet in a jar, so we need to manually add it - addClassCodebase(codebases, "elasticsearch", "org.elasticsearch.plugins.loading.PluginsService"); + addClassCodebase(codebases, "elasticsearch", "org.elasticsearch.plugins.PluginsService"); addClassCodebase(codebases, "elasticsearch-plugin-classloader", "org.elasticsearch.plugins.loader.ExtendedPluginsClassLoader"); addClassCodebase(codebases, "elasticsearch-nio", "org.elasticsearch.nio.ChannelFactory"); addClassCodebase(codebases, "elasticsearch-secure-sm", "org.elasticsearch.secure_sm.SecureSM"); diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java index a348be75d0449..520aff77497ba 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java +++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java @@ -28,9 +28,9 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.plugins.MockPluginsService; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.MockPluginsService; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.readiness.MockReadinessService; import org.elasticsearch.readiness.ReadinessService; import org.elasticsearch.script.MockScriptService; diff --git a/test/framework/src/main/java/org/elasticsearch/plugins/loading/MockPluginsService.java b/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java similarity index 97% rename from test/framework/src/main/java/org/elasticsearch/plugins/loading/MockPluginsService.java rename to test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java index a393953ca2f99..9d8e7dedcc06d 100644 --- a/test/framework/src/main/java/org/elasticsearch/plugins/loading/MockPluginsService.java +++ b/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins.loading; +package org.elasticsearch.plugins; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -16,9 +16,6 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.env.Environment; import org.elasticsearch.jdk.ModuleQualifiedExportsService; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginDescriptor; -import org.elasticsearch.plugins.PluginRuntimeInfo; import org.elasticsearch.plugins.spi.SPIClassIterator; import java.lang.reflect.Constructor; diff --git a/test/framework/src/main/java/org/elasticsearch/plugins/loading/PluginTestUtil.java b/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java similarity index 96% rename from test/framework/src/main/java/org/elasticsearch/plugins/loading/PluginTestUtil.java rename to test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java index 4418208a10272..7edb46d3e3786 100644 --- a/test/framework/src/main/java/org/elasticsearch/plugins/loading/PluginTestUtil.java +++ b/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java @@ -6,10 +6,9 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugins.loading; +package org.elasticsearch.plugins; import org.elasticsearch.Version; -import org.elasticsearch.plugins.PluginDescriptor; import java.io.IOException; import java.io.OutputStream; diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index 42332500a83a8..271df2a971fb1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -65,11 +65,11 @@ import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.node.InternalSettingsPreparer; import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.plugins.MockPluginsService; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.plugins.SearchPlugin; -import org.elasticsearch.plugins.loading.MockPluginsService; -import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.plugins.scanners.StablePluginsRegistry; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.MockScriptService; diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java index c4683bacc75c6..5dc707e94bdd7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java @@ -21,7 +21,7 @@ import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.search.SearchService; diff --git a/test/framework/src/test/java/org/elasticsearch/plugins/MockPluginsServiceTests.java b/test/framework/src/test/java/org/elasticsearch/plugins/MockPluginsServiceTests.java index b8f8cecd3615d..055e56db3fb33 100644 --- a/test/framework/src/test/java/org/elasticsearch/plugins/MockPluginsServiceTests.java +++ b/test/framework/src/test/java/org/elasticsearch/plugins/MockPluginsServiceTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; -import org.elasticsearch.plugins.loading.MockPluginsService; import org.elasticsearch.test.ESTestCase; import org.junit.Before; diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityRestCancellationIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityRestCancellationIT.java index e14ebcd0930be..5b2803c8f4186 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityRestCancellationIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityRestCancellationIT.java @@ -16,7 +16,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESIntegTestCase; diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java index 19b306868738a..ba72be655a7ff 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java @@ -26,7 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckActionTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckActionTests.java index a153b40f730f0..80692efb7474a 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckActionTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckActionTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; diff --git a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java index 44367b7de51ea..80bb0368a1afc 100644 --- a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java +++ b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java @@ -57,7 +57,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchResponseUtils; diff --git a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java index efa19266a38fd..414705aff0b79 100644 --- a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java +++ b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java @@ -22,7 +22,7 @@ import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; diff --git a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java index ef4f179bb93a2..5250a1f764e5c 100644 --- a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java +++ b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java @@ -29,7 +29,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.license.TestUtils; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.rest.RestUtils; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.Scope; diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/action/CancellationIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/action/CancellationIT.java index dc71c8fd46f79..183ef3786a62d 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/action/CancellationIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/action/CancellationIT.java @@ -19,7 +19,7 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.search.lookup.LeafStoredFieldsLookup; import org.elasticsearch.tasks.CancellableTask; diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java index ece12dd4d1167..42542b63c80d1 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java @@ -38,8 +38,8 @@ import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.license.LicenseSettings; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.RepositoryPlugin; -import org.elasticsearch.plugins.loading.PluginsService; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoriesService; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java index 9aa0201d6b4cc..2ced54a513146 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java @@ -31,7 +31,7 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.test.SecuritySingleNodeTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileCancellationIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileCancellationIntegTests.java index 5e004b9c048e5..87a5146113f72 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileCancellationIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileCancellationIntegTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.tasks.CancellableTask; diff --git a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java index a562c379b301d..7ad54901e2d06 100644 --- a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java +++ b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java @@ -22,7 +22,7 @@ import org.elasticsearch.license.LicenseSettings; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; diff --git a/x-pack/plugin/write-load-forecaster/src/internalClusterTest/java/org/elasticsearch/xpack/writeloadforecaster/WriteLoadForecasterIT.java b/x-pack/plugin/write-load-forecaster/src/internalClusterTest/java/org/elasticsearch/xpack/writeloadforecaster/WriteLoadForecasterIT.java index 4b6e31ae7105f..cb93725b320d1 100644 --- a/x-pack/plugin/write-load-forecaster/src/internalClusterTest/java/org/elasticsearch/xpack/writeloadforecaster/WriteLoadForecasterIT.java +++ b/x-pack/plugin/write-load-forecaster/src/internalClusterTest/java/org/elasticsearch/xpack/writeloadforecaster/WriteLoadForecasterIT.java @@ -32,7 +32,7 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.loading.PluginsService; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentType; import org.junit.Before;