From fdb1b2bf796fc26d85c98dc3e2c1913f76e35c8d Mon Sep 17 00:00:00 2001
From: Jim Ferenczi
Date: Thu, 5 Dec 2024 14:20:31 +0000
Subject: [PATCH 01/26] Add a new `offset_source` field to store offsets
referencing substrings of another field. (#118017)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This field is primarily designed for use with the `semantic_text` field, where it enables storing offsets that point to substrings of the field used to generate its underlying chunks.
To prevent external usage, the field is intentionally undocumented, with detailed javadocs explaining its specific purpose and limitations.
I couldn’t find a way to fully block external usage, but skipping the docs should keep it mostly out of sight for now.
---
.../xpack/inference/InferencePlugin.java | 8 +-
.../inference/mapper/OffsetSourceField.java | 145 ++++++++++
.../mapper/OffsetSourceFieldMapper.java | 253 ++++++++++++++++++
.../mapper/OffsetSourceFieldMapperTests.java | 216 +++++++++++++++
.../mapper/OffsetSourceFieldTests.java | 72 +++++
.../mapper/OffsetSourceFieldTypeTests.java | 44 +++
6 files changed, 737 insertions(+), 1 deletion(-)
create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceField.java
create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldMapper.java
create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldMapperTests.java
create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldTests.java
create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldTypeTests.java
diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java
index 48458bf4f508..3c14e51a3c2d 100644
--- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java
+++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java
@@ -68,6 +68,7 @@
import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender;
import org.elasticsearch.xpack.inference.external.http.sender.RequestExecutorServiceSettings;
import org.elasticsearch.xpack.inference.logging.ThrottlerManager;
+import org.elasticsearch.xpack.inference.mapper.OffsetSourceFieldMapper;
import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper;
import org.elasticsearch.xpack.inference.queries.SemanticQueryBuilder;
import org.elasticsearch.xpack.inference.rank.random.RandomRankBuilder;
@@ -392,7 +393,12 @@ public void close() {
@Override
public Map getMappers() {
- return Map.of(SemanticTextFieldMapper.CONTENT_TYPE, SemanticTextFieldMapper.PARSER);
+ return Map.of(
+ SemanticTextFieldMapper.CONTENT_TYPE,
+ SemanticTextFieldMapper.PARSER,
+ OffsetSourceFieldMapper.CONTENT_TYPE,
+ OffsetSourceFieldMapper.PARSER
+ );
}
@Override
diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceField.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceField.java
new file mode 100644
index 000000000000..d8339f1004da
--- /dev/null
+++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceField.java
@@ -0,0 +1,145 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.inference.mapper;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.search.DocIdSetIterator;
+
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+/**
+ * Represents a {@link Field} that stores a {@link Term} along with its start and end offsets.
+ * Note: The {@link Charset} used to calculate these offsets is not associated with this field.
+ * It is the responsibility of the consumer to handle the appropriate {@link Charset}.
+ */
+public final class OffsetSourceField extends Field {
+ private static final FieldType FIELD_TYPE = new FieldType();
+
+ static {
+ FIELD_TYPE.setTokenized(false);
+ FIELD_TYPE.setOmitNorms(true);
+ FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ }
+
+ private int startOffset;
+ private int endOffset;
+
+ public OffsetSourceField(String fieldName, String sourceFieldName, int startOffset, int endOffset) {
+ super(fieldName, sourceFieldName, FIELD_TYPE);
+ this.startOffset = startOffset;
+ this.endOffset = endOffset;
+ }
+
+ public void setValues(String fieldName, int startOffset, int endOffset) {
+ this.fieldsData = fieldName;
+ this.startOffset = startOffset;
+ this.endOffset = endOffset;
+ }
+
+ @Override
+ public TokenStream tokenStream(Analyzer analyzer, TokenStream reuse) {
+ OffsetTokenStream stream;
+ if (reuse instanceof OffsetTokenStream) {
+ stream = (OffsetTokenStream) reuse;
+ } else {
+ stream = new OffsetTokenStream();
+ }
+
+ stream.setValues((String) fieldsData, startOffset, endOffset);
+ return stream;
+ }
+
+ public static OffsetSourceLoader loader(Terms terms) throws IOException {
+ return new OffsetSourceLoader(terms);
+ }
+
+ private static final class OffsetTokenStream extends TokenStream {
+ private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class);
+ private final OffsetAttribute offsetAttribute = addAttribute(OffsetAttribute.class);
+ private boolean used = true;
+ private String term = null;
+ private int startOffset = 0;
+ private int endOffset = 0;
+
+ private OffsetTokenStream() {}
+
+ /** Sets the values */
+ void setValues(String term, int startOffset, int endOffset) {
+ this.term = term;
+ this.startOffset = startOffset;
+ this.endOffset = endOffset;
+ }
+
+ @Override
+ public boolean incrementToken() {
+ if (used) {
+ return false;
+ }
+ clearAttributes();
+ termAttribute.append(term);
+ offsetAttribute.setOffset(startOffset, endOffset);
+ used = true;
+ return true;
+ }
+
+ @Override
+ public void reset() {
+ used = false;
+ }
+
+ @Override
+ public void close() {
+ term = null;
+ }
+ }
+
+ public static class OffsetSourceLoader {
+ private final Map postingsEnums = new LinkedHashMap<>();
+
+ private OffsetSourceLoader(Terms terms) throws IOException {
+ var termsEnum = terms.iterator();
+ while (termsEnum.next() != null) {
+ var postings = termsEnum.postings(null, PostingsEnum.OFFSETS);
+ if (postings.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+ postingsEnums.put(termsEnum.term().utf8ToString(), postings);
+ }
+ }
+ }
+
+ public OffsetSourceFieldMapper.OffsetSource advanceTo(int doc) throws IOException {
+ for (var it = postingsEnums.entrySet().iterator(); it.hasNext();) {
+ var entry = it.next();
+ var postings = entry.getValue();
+ if (postings.docID() < doc) {
+ if (postings.advance(doc) == DocIdSetIterator.NO_MORE_DOCS) {
+ it.remove();
+ continue;
+ }
+ }
+ if (postings.docID() == doc) {
+ assert postings.freq() == 1;
+ postings.nextPosition();
+ return new OffsetSourceFieldMapper.OffsetSource(entry.getKey(), postings.startOffset(), postings.endOffset());
+ }
+ }
+ return null;
+ }
+ }
+}
diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldMapper.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldMapper.java
new file mode 100644
index 000000000000..e612076f1aaf
--- /dev/null
+++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldMapper.java
@@ -0,0 +1,253 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.inference.mapper;
+
+import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.index.fielddata.FieldDataContext;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.mapper.DocumentParserContext;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MappedFieldType;
+import org.elasticsearch.index.mapper.MapperBuilderContext;
+import org.elasticsearch.index.mapper.TextSearchInfo;
+import org.elasticsearch.index.mapper.ValueFetcher;
+import org.elasticsearch.index.query.SearchExecutionContext;
+import org.elasticsearch.search.fetch.StoredFieldsSpec;
+import org.elasticsearch.search.lookup.Source;
+import org.elasticsearch.xcontent.ConstructingObjectParser;
+import org.elasticsearch.xcontent.ParseField;
+import org.elasticsearch.xcontent.ToXContentObject;
+import org.elasticsearch.xcontent.XContentBuilder;
+import org.elasticsearch.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg;
+
+/**
+ * A {@link FieldMapper} that maps a field name to its start and end offsets.
+ * The {@link CharsetFormat} used to compute the offsets is specified via the charset parameter.
+ * Currently, only {@link CharsetFormat#UTF_16} is supported, aligning with Java's {@code String} charset
+ * for simpler internal usage and integration.
+ *
+ * Each document can store at most one value in this field.
+ *
+ * Note: This mapper is not yet documented and is intended exclusively for internal use by
+ * {@link SemanticTextFieldMapper}. If exposing this mapper directly to users becomes necessary,
+ * extending charset compatibility should be considered, as the current default (and sole supported charset)
+ * was chosen for ease of Java integration.
+ */
+public class OffsetSourceFieldMapper extends FieldMapper {
+ public static final String CONTENT_TYPE = "offset_source";
+
+ private static final String SOURCE_NAME_FIELD = "field";
+ private static final String START_OFFSET_FIELD = "start";
+ private static final String END_OFFSET_FIELD = "end";
+
+ public record OffsetSource(String field, int start, int end) implements ToXContentObject {
+ public OffsetSource {
+ if (start < 0 || end < 0) {
+ throw new IllegalArgumentException("Illegal offsets, expected positive numbers, got: " + start + ":" + end);
+ }
+ if (start > end) {
+ throw new IllegalArgumentException("Illegal offsets, expected start < end, got: " + start + " > " + end);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.field(SOURCE_NAME_FIELD, field);
+ builder.field(START_OFFSET_FIELD, start);
+ builder.field(END_OFFSET_FIELD, end);
+ return builder.endObject();
+ }
+ }
+
+ private static final ConstructingObjectParser OFFSET_SOURCE_PARSER = new ConstructingObjectParser<>(
+ CONTENT_TYPE,
+ true,
+ args -> new OffsetSource((String) args[0], (int) args[1], (int) args[2])
+ );
+
+ static {
+ OFFSET_SOURCE_PARSER.declareString(constructorArg(), new ParseField(SOURCE_NAME_FIELD));
+ OFFSET_SOURCE_PARSER.declareInt(constructorArg(), new ParseField(START_OFFSET_FIELD));
+ OFFSET_SOURCE_PARSER.declareInt(constructorArg(), new ParseField(END_OFFSET_FIELD));
+ }
+
+ public enum CharsetFormat {
+ UTF_16(StandardCharsets.UTF_16);
+
+ private Charset charSet;
+
+ CharsetFormat(Charset charSet) {
+ this.charSet = charSet;
+ }
+ }
+
+ public static class Builder extends FieldMapper.Builder {
+ private final Parameter charset = Parameter.enumParam(
+ "charset",
+ false,
+ i -> CharsetFormat.UTF_16,
+ CharsetFormat.UTF_16,
+ CharsetFormat.class
+ );
+ private final Parameter
*/
- public abstract Comparator partiallyBuiltBucketComparator(ToLongFunction ordinalReader, Aggregator aggregator);
+ public abstract Comparator> partiallyBuiltBucketComparator(Aggregator aggregator);
/**
* Build a comparator for fully built buckets.
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java
index b2ca4a10dc4b..3593eb5adf7e 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java
@@ -15,6 +15,7 @@
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.search.aggregations.Aggregator.BucketComparator;
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;
+import org.elasticsearch.search.aggregations.bucket.terms.BucketAndOrd;
import org.elasticsearch.search.aggregations.support.AggregationPath;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.search.sort.SortValue;
@@ -30,7 +31,6 @@
import java.util.List;
import java.util.Objects;
import java.util.function.BiFunction;
-import java.util.function.ToLongFunction;
/**
* Implementations for {@link Bucket} ordering strategies.
@@ -63,10 +63,10 @@ public AggregationPath path() {
}
@Override
- public Comparator partiallyBuiltBucketComparator(ToLongFunction ordinalReader, Aggregator aggregator) {
+ public Comparator> partiallyBuiltBucketComparator(Aggregator aggregator) {
try {
BucketComparator bucketComparator = path.bucketComparator(aggregator, order);
- return (lhs, rhs) -> bucketComparator.compare(ordinalReader.applyAsLong(lhs), ordinalReader.applyAsLong(rhs));
+ return (lhs, rhs) -> bucketComparator.compare(lhs.ord, rhs.ord);
} catch (IllegalArgumentException e) {
throw new AggregationExecutionException.InvalidPath("Invalid aggregation order path [" + path + "]. " + e.getMessage(), e);
}
@@ -188,12 +188,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
}
@Override
- public Comparator partiallyBuiltBucketComparator(ToLongFunction ordinalReader, Aggregator aggregator) {
- List> comparators = orderElements.stream()
- .map(oe -> oe.partiallyBuiltBucketComparator(ordinalReader, aggregator))
- .toList();
+ public Comparator> partiallyBuiltBucketComparator(Aggregator aggregator) {
+ List>> comparators = new ArrayList<>(orderElements.size());
+ for (BucketOrder order : orderElements) {
+ comparators.add(order.partiallyBuiltBucketComparator(aggregator));
+ }
return (lhs, rhs) -> {
- for (Comparator c : comparators) {
+ for (Comparator> c : comparators) {
int result = c.compare(lhs, rhs);
if (result != 0) {
return result;
@@ -299,9 +300,9 @@ byte id() {
}
@Override
- public Comparator partiallyBuiltBucketComparator(ToLongFunction ordinalReader, Aggregator aggregator) {
+ public Comparator> partiallyBuiltBucketComparator(Aggregator aggregator) {
Comparator comparator = comparator();
- return comparator::compare;
+ return (lhs, rhs) -> comparator.compare(lhs.bucket, rhs.bucket);
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java
index 344b90b06c4f..571ce3a9a451 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java
@@ -13,6 +13,7 @@
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.util.IntArray;
import org.elasticsearch.common.util.LongArray;
import org.elasticsearch.common.util.ObjectArray;
import org.elasticsearch.core.Releasables;
@@ -26,6 +27,7 @@
import org.elasticsearch.search.aggregations.InternalOrder;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
+import org.elasticsearch.search.aggregations.bucket.terms.BucketAndOrd;
import org.elasticsearch.search.aggregations.bucket.terms.BucketPriorityQueue;
import org.elasticsearch.search.aggregations.bucket.terms.BytesKeyedBucketOrds;
import org.elasticsearch.search.aggregations.bucket.terms.InternalTerms;
@@ -38,7 +40,6 @@
import java.util.Arrays;
import java.util.Map;
import java.util.function.BiConsumer;
-import java.util.function.Supplier;
import static java.util.Collections.emptyList;
import static org.elasticsearch.search.aggregations.InternalOrder.isKeyOrder;
@@ -115,51 +116,57 @@ public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throw
LongArray otherDocCounts = bigArrays().newLongArray(owningBucketOrds.size());
ObjectArray topBucketsPerOrd = bigArrays().newObjectArray(owningBucketOrds.size())
) {
- for (long ordIdx = 0; ordIdx < topBucketsPerOrd.size(); ordIdx++) {
- int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize());
-
- // as users can't control sort order, in practice we'll always sort by doc count descending
- try (
- BucketPriorityQueue ordered = new BucketPriorityQueue<>(
- size,
- bigArrays(),
- partiallyBuiltBucketComparator
- )
- ) {
- StringTerms.Bucket spare = null;
- BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds.get(ordIdx));
- Supplier emptyBucketBuilder = () -> new StringTerms.Bucket(
- new BytesRef(),
- 0,
- null,
- false,
- 0,
- format
- );
- while (ordsEnum.next()) {
- long docCount = bucketDocCount(ordsEnum.ord());
- otherDocCounts.increment(ordIdx, docCount);
- if (spare == null) {
- checkRealMemoryCBForInternalBucket();
- spare = emptyBucketBuilder.get();
+ try (IntArray bucketsToCollect = bigArrays().newIntArray(owningBucketOrds.size())) {
+ // find how many buckets we are going to collect
+ long ordsToCollect = 0;
+ for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) {
+ int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrds.get(ordIdx)), bucketCountThresholds.getShardSize());
+ bucketsToCollect.set(ordIdx, size);
+ ordsToCollect += size;
+ }
+ try (LongArray ordsArray = bigArrays().newLongArray(ordsToCollect)) {
+ long ordsCollected = 0;
+ for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) {
+ // as users can't control sort order, in practice we'll always sort by doc count descending
+ try (
+ BucketPriorityQueue ordered = new BucketPriorityQueue<>(
+ bucketsToCollect.get(ordIdx),
+ bigArrays(),
+ order.partiallyBuiltBucketComparator(this)
+ )
+ ) {
+ BucketAndOrd spare = null;
+ BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds.get(ordIdx));
+ while (ordsEnum.next()) {
+ long docCount = bucketDocCount(ordsEnum.ord());
+ otherDocCounts.increment(ordIdx, docCount);
+ if (spare == null) {
+ checkRealMemoryCBForInternalBucket();
+ spare = new BucketAndOrd<>(new StringTerms.Bucket(new BytesRef(), 0, null, false, 0, format));
+ }
+ ordsEnum.readValue(spare.bucket.getTermBytes());
+ spare.bucket.setDocCount(docCount);
+ spare.ord = ordsEnum.ord();
+ spare = ordered.insertWithOverflow(spare);
+ }
+ final int orderedSize = (int) ordered.size();
+ final StringTerms.Bucket[] buckets = new StringTerms.Bucket[orderedSize];
+ for (int i = orderedSize - 1; i >= 0; --i) {
+ BucketAndOrd bucketAndOrd = ordered.pop();
+ buckets[i] = bucketAndOrd.bucket;
+ ordsArray.set(ordsCollected + i, bucketAndOrd.ord);
+ otherDocCounts.increment(ordIdx, -bucketAndOrd.bucket.getDocCount());
+ bucketAndOrd.bucket.setTermBytes(BytesRef.deepCopyOf(bucketAndOrd.bucket.getTermBytes()));
+ }
+ topBucketsPerOrd.set(ordIdx, buckets);
+ ordsCollected += orderedSize;
}
- ordsEnum.readValue(spare.getTermBytes());
- spare.setDocCount(docCount);
- spare.setBucketOrd(ordsEnum.ord());
- spare = ordered.insertWithOverflow(spare);
- }
-
- topBucketsPerOrd.set(ordIdx, new StringTerms.Bucket[(int) ordered.size()]);
- for (int i = (int) ordered.size() - 1; i >= 0; --i) {
- topBucketsPerOrd.get(ordIdx)[i] = ordered.pop();
- otherDocCounts.increment(ordIdx, -topBucketsPerOrd.get(ordIdx)[i].getDocCount());
- topBucketsPerOrd.get(ordIdx)[i].setTermBytes(BytesRef.deepCopyOf(topBucketsPerOrd.get(ordIdx)[i].getTermBytes()));
}
+ assert ordsCollected == ordsArray.size();
+ buildSubAggsForAllBuckets(topBucketsPerOrd, ordsArray, InternalTerms.Bucket::setAggregations);
}
}
- buildSubAggsForAllBuckets(topBucketsPerOrd, InternalTerms.Bucket::getBucketOrd, InternalTerms.Bucket::setAggregations);
-
return buildAggregations(Math.toIntExact(owningBucketOrds.size()), ordIdx -> {
final BucketOrder reduceOrder;
if (isKeyOrder(order) == false) {
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketPriorityQueue.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketPriorityQueue.java
index 7f8e5c8c885f..9550003a5bd1 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketPriorityQueue.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketPriorityQueue.java
@@ -13,17 +13,17 @@
import java.util.Comparator;
-public class BucketPriorityQueue extends ObjectArrayPriorityQueue {
+public class BucketPriorityQueue extends ObjectArrayPriorityQueue> {
- private final Comparator super B> comparator;
+ private final Comparator> comparator;
- public BucketPriorityQueue(int size, BigArrays bigArrays, Comparator super B> comparator) {
+ public BucketPriorityQueue(int size, BigArrays bigArrays, Comparator> comparator) {
super(size, bigArrays);
this.comparator = comparator;
}
@Override
- protected boolean lessThan(B a, B b) {
+ protected boolean lessThan(BucketAndOrd a, BucketAndOrd b) {
return comparator.compare(a, b) > 0; // reverse, since we reverse again when adding to a list
}
}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketSignificancePriorityQueue.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketSignificancePriorityQueue.java
index fe751c9e7918..4736f52d9362 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketSignificancePriorityQueue.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketSignificancePriorityQueue.java
@@ -12,14 +12,14 @@
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.ObjectArrayPriorityQueue;
-public class BucketSignificancePriorityQueue extends ObjectArrayPriorityQueue {
+public class BucketSignificancePriorityQueue extends ObjectArrayPriorityQueue> {
public BucketSignificancePriorityQueue(int size, BigArrays bigArrays) {
super(size, bigArrays);
}
@Override
- protected boolean lessThan(SignificantTerms.Bucket o1, SignificantTerms.Bucket o2) {
- return o1.getSignificanceScore() < o2.getSignificanceScore();
+ protected boolean lessThan(BucketAndOrd o1, BucketAndOrd o2) {
+ return o1.bucket.getSignificanceScore() < o2.bucket.getSignificanceScore();
}
}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java
index 0ec03a6f56dd..439b61cc43dd 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java
@@ -20,6 +20,7 @@
import org.apache.lucene.util.PriorityQueue;
import org.elasticsearch.common.CheckedSupplier;
import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.util.IntArray;
import org.elasticsearch.common.util.LongArray;
import org.elasticsearch.common.util.LongHash;
import org.elasticsearch.common.util.ObjectArray;
@@ -561,10 +562,10 @@ InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOExc
) {
GlobalOrdLookupFunction lookupGlobalOrd = valuesSupplier.get()::lookupOrd;
final int size = (int) Math.min(valueCount, bucketCountThresholds.getShardSize());
- try (ObjectArrayPriorityQueue ordered = collectionStrategy.buildPriorityQueue(size)) {
+ try (ObjectArrayPriorityQueue> ordered = collectionStrategy.buildPriorityQueue(size)) {
BucketUpdater updater = collectionStrategy.bucketUpdater(0, lookupGlobalOrd);
collect(new BucketInfoConsumer() {
- TB spare = null;
+ BucketAndOrd spare = null;
@Override
public void accept(long globalOrd, long bucketOrd, long docCount) throws IOException {
@@ -572,24 +573,31 @@ public void accept(long globalOrd, long bucketOrd, long docCount) throws IOExcep
if (docCount >= bucketCountThresholds.getShardMinDocCount()) {
if (spare == null) {
checkRealMemoryCBForInternalBucket();
- spare = collectionStrategy.buildEmptyTemporaryBucket();
+ spare = new BucketAndOrd<>(collectionStrategy.buildEmptyTemporaryBucket());
}
- updater.updateBucket(spare, globalOrd, bucketOrd, docCount);
+ spare.ord = bucketOrd;
+ updater.updateBucket(spare.bucket, globalOrd, docCount);
spare = ordered.insertWithOverflow(spare);
}
}
});
// Get the top buckets
- topBucketsPreOrd.set(0, collectionStrategy.buildBuckets((int) ordered.size()));
- for (int i = (int) ordered.size() - 1; i >= 0; --i) {
- checkRealMemoryCBForInternalBucket();
- B bucket = collectionStrategy.convertTempBucketToRealBucket(ordered.pop(), lookupGlobalOrd);
- topBucketsPreOrd.get(0)[i] = bucket;
- otherDocCount.increment(0, -bucket.getDocCount());
+ int orderedSize = (int) ordered.size();
+ try (LongArray ordsArray = bigArrays().newLongArray(orderedSize)) {
+ B[] buckets = collectionStrategy.buildBuckets(orderedSize);
+ for (int i = orderedSize - 1; i >= 0; --i) {
+ checkRealMemoryCBForInternalBucket();
+ BucketAndOrd bucketAndOrd = ordered.pop();
+ B bucket = collectionStrategy.convertTempBucketToRealBucket(bucketAndOrd.bucket, lookupGlobalOrd);
+ ordsArray.set(i, bucketAndOrd.ord);
+ buckets[i] = bucket;
+ otherDocCount.increment(0, -bucket.getDocCount());
+ }
+ topBucketsPreOrd.set(0, buckets);
+ collectionStrategy.buildSubAggs(topBucketsPreOrd, ordsArray);
}
}
- collectionStrategy.buildSubAggs(topBucketsPreOrd);
return GlobalOrdinalsStringTermsAggregator.this.buildAggregations(
Math.toIntExact(owningBucketOrds.size()),
ordIdx -> collectionStrategy.buildResult(
@@ -710,39 +718,61 @@ InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOExc
LongArray otherDocCount = bigArrays().newLongArray(owningBucketOrds.size(), true);
ObjectArray topBucketsPreOrd = collectionStrategy.buildTopBucketsPerOrd(owningBucketOrds.size())
) {
- GlobalOrdLookupFunction lookupGlobalOrd = valuesSupplier.get()::lookupOrd;
- for (long ordIdx = 0; ordIdx < topBucketsPreOrd.size(); ordIdx++) {
- long owningBucketOrd = owningBucketOrds.get(ordIdx);
- collectZeroDocEntriesIfNeeded(owningBucketOrds.get(ordIdx));
- int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrd), bucketCountThresholds.getShardSize());
- try (ObjectArrayPriorityQueue ordered = collectionStrategy.buildPriorityQueue(size)) {
- BucketUpdater updater = collectionStrategy.bucketUpdater(owningBucketOrd, lookupGlobalOrd);
- LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd);
- TB spare = null;
- while (ordsEnum.next()) {
- long docCount = bucketDocCount(ordsEnum.ord());
- otherDocCount.increment(ordIdx, docCount);
- if (docCount < bucketCountThresholds.getShardMinDocCount()) {
- continue;
- }
- if (spare == null) {
- checkRealMemoryCBForInternalBucket();
- spare = collectionStrategy.buildEmptyTemporaryBucket();
+ try (IntArray bucketsToCollect = bigArrays().newIntArray(owningBucketOrds.size())) {
+ long ordsToCollect = 0;
+ for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) {
+ final long owningBucketOrd = owningBucketOrds.get(ordIdx);
+ collectZeroDocEntriesIfNeeded(owningBucketOrd);
+ final int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrd), bucketCountThresholds.getShardSize());
+ ordsToCollect += size;
+ bucketsToCollect.set(ordIdx, size);
+ }
+ try (LongArray ordsArray = bigArrays().newLongArray(ordsToCollect)) {
+ long ordsCollected = 0;
+ GlobalOrdLookupFunction lookupGlobalOrd = valuesSupplier.get()::lookupOrd;
+ for (long ordIdx = 0; ordIdx < topBucketsPreOrd.size(); ordIdx++) {
+ long owningBucketOrd = owningBucketOrds.get(ordIdx);
+ try (
+ ObjectArrayPriorityQueue> ordered = collectionStrategy.buildPriorityQueue(
+ bucketsToCollect.get(ordIdx)
+ )
+ ) {
+ BucketUpdater updater = collectionStrategy.bucketUpdater(owningBucketOrd, lookupGlobalOrd);
+ LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd);
+ BucketAndOrd spare = null;
+ while (ordsEnum.next()) {
+ long docCount = bucketDocCount(ordsEnum.ord());
+ otherDocCount.increment(ordIdx, docCount);
+ if (docCount < bucketCountThresholds.getShardMinDocCount()) {
+ continue;
+ }
+ if (spare == null) {
+ checkRealMemoryCBForInternalBucket();
+ spare = new BucketAndOrd<>(collectionStrategy.buildEmptyTemporaryBucket());
+ }
+ updater.updateBucket(spare.bucket, ordsEnum.value(), docCount);
+ spare.ord = ordsEnum.ord();
+ spare = ordered.insertWithOverflow(spare);
+ }
+ // Get the top buckets
+ int orderedSize = (int) ordered.size();
+ B[] buckets = collectionStrategy.buildBuckets(orderedSize);
+ for (int i = orderedSize - 1; i >= 0; --i) {
+ checkRealMemoryCBForInternalBucket();
+ BucketAndOrd bucketAndOrd = ordered.pop();
+ B bucket = collectionStrategy.convertTempBucketToRealBucket(bucketAndOrd.bucket, lookupGlobalOrd);
+ ordsArray.set(ordsCollected + i, bucketAndOrd.ord);
+ buckets[i] = bucket;
+ otherDocCount.increment(ordIdx, -bucket.getDocCount());
+ }
+ topBucketsPreOrd.set(ordIdx, buckets);
+ ordsCollected += orderedSize;
}
- updater.updateBucket(spare, ordsEnum.value(), ordsEnum.ord(), docCount);
- spare = ordered.insertWithOverflow(spare);
- }
- // Get the top buckets
- topBucketsPreOrd.set(ordIdx, collectionStrategy.buildBuckets((int) ordered.size()));
- for (int i = (int) ordered.size() - 1; i >= 0; --i) {
- checkRealMemoryCBForInternalBucket();
- B bucket = collectionStrategy.convertTempBucketToRealBucket(ordered.pop(), lookupGlobalOrd);
- topBucketsPreOrd.get(ordIdx)[i] = bucket;
- otherDocCount.increment(ordIdx, -bucket.getDocCount());
}
+ assert ordsCollected == ordsArray.size();
+ collectionStrategy.buildSubAggs(topBucketsPreOrd, ordsArray);
}
}
- collectionStrategy.buildSubAggs(topBucketsPreOrd);
return GlobalOrdinalsStringTermsAggregator.this.buildAggregations(
Math.toIntExact(owningBucketOrds.size()),
ordIdx -> collectionStrategy.buildResult(
@@ -791,7 +821,7 @@ abstract class ResultStrategy<
* Build a {@link PriorityQueue} to sort the buckets. After we've
* collected all of the buckets we'll collect all entries in the queue.
*/
- abstract ObjectArrayPriorityQueue buildPriorityQueue(int size);
+ abstract ObjectArrayPriorityQueue> buildPriorityQueue(int size);
/**
* Build an array to hold the "top" buckets for each ordinal.
@@ -813,7 +843,7 @@ abstract class ResultStrategy<
* Build the sub-aggregations into the buckets. This will usually
* delegate to {@link #buildSubAggsForAllBuckets}.
*/
- abstract void buildSubAggs(ObjectArray topBucketsPreOrd) throws IOException;
+ abstract void buildSubAggs(ObjectArray topBucketsPreOrd, LongArray ordsArray) throws IOException;
/**
* Turn the buckets into an aggregation result.
@@ -834,7 +864,7 @@ abstract class ResultStrategy<
}
interface BucketUpdater {
- void updateBucket(TB spare, long globalOrd, long bucketOrd, long docCount) throws IOException;
+ void updateBucket(TB spare, long globalOrd, long docCount) throws IOException;
}
/**
@@ -868,29 +898,30 @@ OrdBucket buildEmptyTemporaryBucket() {
@Override
BucketUpdater bucketUpdater(long owningBucketOrd, GlobalOrdLookupFunction lookupGlobalOrd) {
- return (spare, globalOrd, bucketOrd, docCount) -> {
+ return (spare, globalOrd, docCount) -> {
spare.globalOrd = globalOrd;
- spare.bucketOrd = bucketOrd;
spare.docCount = docCount;
};
}
@Override
- ObjectArrayPriorityQueue buildPriorityQueue(int size) {
- return new BucketPriorityQueue<>(size, bigArrays(), partiallyBuiltBucketComparator);
+ ObjectArrayPriorityQueue> buildPriorityQueue(int size) {
+ return new BucketPriorityQueue<>(
+ size,
+ bigArrays(),
+ order.partiallyBuiltBucketComparator(GlobalOrdinalsStringTermsAggregator.this)
+ );
}
@Override
StringTerms.Bucket convertTempBucketToRealBucket(OrdBucket temp, GlobalOrdLookupFunction lookupGlobalOrd) throws IOException {
BytesRef term = BytesRef.deepCopyOf(lookupGlobalOrd.apply(temp.globalOrd));
- StringTerms.Bucket result = new StringTerms.Bucket(term, temp.docCount, null, showTermDocCountError, 0, format);
- result.bucketOrd = temp.bucketOrd;
- return result;
+ return new StringTerms.Bucket(term, temp.docCount, null, showTermDocCountError, 0, format);
}
@Override
- void buildSubAggs(ObjectArray topBucketsPreOrd) throws IOException {
- buildSubAggsForAllBuckets(topBucketsPreOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs);
+ void buildSubAggs(ObjectArray topBucketsPreOrd, LongArray ordsArray) throws IOException {
+ buildSubAggsForAllBuckets(topBucketsPreOrd, ordsArray, (b, aggs) -> b.aggregations = aggs);
}
@Override
@@ -1005,8 +1036,7 @@ private long subsetSize(long owningBucketOrd) {
@Override
BucketUpdater bucketUpdater(long owningBucketOrd, GlobalOrdLookupFunction lookupGlobalOrd) {
long subsetSize = subsetSize(owningBucketOrd);
- return (spare, globalOrd, bucketOrd, docCount) -> {
- spare.bucketOrd = bucketOrd;
+ return (spare, globalOrd, docCount) -> {
oversizedCopy(lookupGlobalOrd.apply(globalOrd), spare.termBytes);
spare.subsetDf = docCount;
spare.supersetDf = backgroundFrequencies.freq(spare.termBytes);
@@ -1020,7 +1050,7 @@ BucketUpdater bucketUpdater(long owningBucketOrd,
}
@Override
- ObjectArrayPriorityQueue buildPriorityQueue(int size) {
+ ObjectArrayPriorityQueue> buildPriorityQueue(int size) {
return new BucketSignificancePriorityQueue<>(size, bigArrays());
}
@@ -1033,8 +1063,8 @@ SignificantStringTerms.Bucket convertTempBucketToRealBucket(
}
@Override
- void buildSubAggs(ObjectArray topBucketsPreOrd) throws IOException {
- buildSubAggsForAllBuckets(topBucketsPreOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs);
+ void buildSubAggs(ObjectArray topBucketsPreOrd, LongArray ordsArray) throws IOException {
+ buildSubAggsForAllBuckets(topBucketsPreOrd, ordsArray, (b, aggs) -> b.aggregations = aggs);
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java
index 78ae2481f5d9..5108793b8a80 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java
@@ -10,12 +10,12 @@
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.util.ObjectArrayPriorityQueue;
import org.elasticsearch.common.util.ObjectObjectPagedHashMap;
import org.elasticsearch.core.Releasables;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.AggregationErrors;
import org.elasticsearch.search.aggregations.AggregationReduceContext;
-import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorReducer;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
@@ -58,12 +58,6 @@ public interface Reader> {
long subsetDf;
long supersetDf;
- /**
- * Ordinal of the bucket while it is being built. Not used after it is
- * returned from {@link Aggregator#buildAggregations(org.elasticsearch.common.util.LongArray)} and not
- * serialized.
- */
- transient long bucketOrd;
double score;
protected InternalAggregations aggregations;
final transient DocValueFormat format;
@@ -235,7 +229,12 @@ canLeadReduction here is essentially checking if this shard returned data. Unma
public InternalAggregation get() {
final SignificanceHeuristic heuristic = getSignificanceHeuristic().rewrite(reduceContext);
final int size = (int) (reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size()));
- try (BucketSignificancePriorityQueue ordered = new BucketSignificancePriorityQueue<>(size, reduceContext.bigArrays())) {
+ try (ObjectArrayPriorityQueue ordered = new ObjectArrayPriorityQueue(size, reduceContext.bigArrays()) {
+ @Override
+ protected boolean lessThan(B a, B b) {
+ return a.getSignificanceScore() < b.getSignificanceScore();
+ }
+ }) {
buckets.forEach(entry -> {
final B b = createBucket(
entry.value.subsetDf[0],
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java
index 739f0b923eaa..de35046691b3 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java
@@ -38,8 +38,6 @@ public interface Reader> {
B read(StreamInput in, DocValueFormat format, boolean showDocCountError) throws IOException;
}
- long bucketOrd;
-
protected long docCount;
private long docCountError;
protected InternalAggregations aggregations;
@@ -88,14 +86,6 @@ public void setDocCount(long docCount) {
this.docCount = docCount;
}
- public long getBucketOrd() {
- return bucketOrd;
- }
-
- public void setBucketOrd(long bucketOrd) {
- this.bucketOrd = bucketOrd;
- }
-
@Override
public long getDocCountError() {
return docCountError;
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java
index b96c495d3748..026912a583ef 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java
@@ -17,6 +17,7 @@
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.PriorityQueue;
+import org.elasticsearch.common.util.IntArray;
import org.elasticsearch.common.util.LongArray;
import org.elasticsearch.common.util.ObjectArray;
import org.elasticsearch.common.util.ObjectArrayPriorityQueue;
@@ -43,6 +44,7 @@
import java.io.IOException;
import java.util.Arrays;
+import java.util.Comparator;
import java.util.Map;
import java.util.function.BiConsumer;
import java.util.function.Function;
@@ -287,40 +289,55 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro
LongArray otherDocCounts = bigArrays().newLongArray(owningBucketOrds.size(), true);
ObjectArray topBucketsPerOrd = buildTopBucketsPerOrd(Math.toIntExact(owningBucketOrds.size()))
) {
- for (long ordIdx = 0; ordIdx < topBucketsPerOrd.size(); ordIdx++) {
- long owningOrd = owningBucketOrds.get(ordIdx);
- collectZeroDocEntriesIfNeeded(owningOrd, excludeDeletedDocs);
- int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize());
-
- try (ObjectArrayPriorityQueue ordered = buildPriorityQueue(size)) {
- B spare = null;
- BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningOrd);
- BucketUpdater bucketUpdater = bucketUpdater(owningOrd);
- while (ordsEnum.next()) {
- long docCount = bucketDocCount(ordsEnum.ord());
- otherDocCounts.increment(ordIdx, docCount);
- if (docCount < bucketCountThresholds.getShardMinDocCount()) {
- continue;
- }
- if (spare == null) {
- checkRealMemoryCBForInternalBucket();
- spare = buildEmptyBucket();
+ try (IntArray bucketsToCollect = bigArrays().newIntArray(owningBucketOrds.size())) {
+ long ordsToCollect = 0;
+ for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) {
+ final long owningBucketOrd = owningBucketOrds.get(ordIdx);
+ collectZeroDocEntriesIfNeeded(owningBucketOrd, excludeDeletedDocs);
+ final int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrd), bucketCountThresholds.getShardSize());
+ ordsToCollect += size;
+ bucketsToCollect.set(ordIdx, size);
+ }
+ try (LongArray ordsArray = bigArrays().newLongArray(ordsToCollect)) {
+ long ordsCollected = 0;
+ for (long ordIdx = 0; ordIdx < topBucketsPerOrd.size(); ordIdx++) {
+ long owningOrd = owningBucketOrds.get(ordIdx);
+ try (ObjectArrayPriorityQueue> ordered = buildPriorityQueue(bucketsToCollect.get(ordIdx))) {
+ BucketAndOrd spare = null;
+ BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningOrd);
+ BucketUpdater bucketUpdater = bucketUpdater(owningOrd);
+ while (ordsEnum.next()) {
+ long docCount = bucketDocCount(ordsEnum.ord());
+ otherDocCounts.increment(ordIdx, docCount);
+ if (docCount < bucketCountThresholds.getShardMinDocCount()) {
+ continue;
+ }
+ if (spare == null) {
+ checkRealMemoryCBForInternalBucket();
+ spare = new BucketAndOrd<>(buildEmptyBucket());
+ }
+ bucketUpdater.updateBucket(spare.bucket, ordsEnum, docCount);
+ spare.ord = ordsEnum.ord();
+ spare = ordered.insertWithOverflow(spare);
+ }
+
+ final int orderedSize = (int) ordered.size();
+ final B[] buckets = buildBuckets(orderedSize);
+ for (int i = orderedSize - 1; i >= 0; --i) {
+ BucketAndOrd bucketAndOrd = ordered.pop();
+ finalizeBucket(bucketAndOrd.bucket);
+ buckets[i] = bucketAndOrd.bucket;
+ ordsArray.set(ordsCollected + i, bucketAndOrd.ord);
+ otherDocCounts.increment(ordIdx, -bucketAndOrd.bucket.getDocCount());
+ }
+ topBucketsPerOrd.set(ordIdx, buckets);
+ ordsCollected += orderedSize;
}
- bucketUpdater.updateBucket(spare, ordsEnum, docCount);
- spare = ordered.insertWithOverflow(spare);
- }
-
- topBucketsPerOrd.set(ordIdx, buildBuckets((int) ordered.size()));
- for (int i = (int) ordered.size() - 1; i >= 0; --i) {
- topBucketsPerOrd.get(ordIdx)[i] = ordered.pop();
- otherDocCounts.increment(ordIdx, -topBucketsPerOrd.get(ordIdx)[i].getDocCount());
- finalizeBucket(topBucketsPerOrd.get(ordIdx)[i]);
}
+ assert ordsCollected == ordsArray.size();
+ buildSubAggs(topBucketsPerOrd, ordsArray);
}
}
-
- buildSubAggs(topBucketsPerOrd);
-
return MapStringTermsAggregator.this.buildAggregations(
Math.toIntExact(owningBucketOrds.size()),
ordIdx -> buildResult(owningBucketOrds.get(ordIdx), otherDocCounts.get(ordIdx), topBucketsPerOrd.get(ordIdx))
@@ -355,7 +372,7 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro
* Build a {@link PriorityQueue} to sort the buckets. After we've
* collected all of the buckets we'll collect all entries in the queue.
*/
- abstract ObjectArrayPriorityQueue buildPriorityQueue(int size);
+ abstract ObjectArrayPriorityQueue> buildPriorityQueue(int size);
/**
* Update fields in {@code spare} to reflect information collected for
@@ -382,9 +399,9 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro
/**
* Build the sub-aggregations into the buckets. This will usually
- * delegate to {@link #buildSubAggsForAllBuckets}.
+ * delegate to {@link #buildSubAggsForAllBuckets(ObjectArray, LongArray, BiConsumer)}.
*/
- abstract void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException;
+ abstract void buildSubAggs(ObjectArray topBucketsPerOrd, LongArray ordsArray) throws IOException;
/**
* Turn the buckets into an aggregation result.
@@ -407,9 +424,11 @@ interface BucketUpdater
*/
class StandardTermsResults extends ResultStrategy {
private final ValuesSource valuesSource;
+ private final Comparator> comparator;
- StandardTermsResults(ValuesSource valuesSource) {
+ StandardTermsResults(ValuesSource valuesSource, Aggregator aggregator) {
this.valuesSource = valuesSource;
+ this.comparator = order.partiallyBuiltBucketComparator(aggregator);
}
@Override
@@ -498,8 +517,8 @@ StringTerms.Bucket buildEmptyBucket() {
}
@Override
- ObjectArrayPriorityQueue buildPriorityQueue(int size) {
- return new BucketPriorityQueue<>(size, bigArrays(), partiallyBuiltBucketComparator);
+ ObjectArrayPriorityQueue> buildPriorityQueue(int size) {
+ return new BucketPriorityQueue<>(size, bigArrays(), comparator);
}
@Override
@@ -507,7 +526,6 @@ BucketUpdater bucketUpdater(long owningBucketOrd) {
return (spare, ordsEnum, docCount) -> {
ordsEnum.readValue(spare.termBytes);
spare.docCount = docCount;
- spare.bucketOrd = ordsEnum.ord();
};
}
@@ -532,8 +550,8 @@ void finalizeBucket(StringTerms.Bucket bucket) {
}
@Override
- void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException {
- buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, a) -> b.aggregations = a);
+ void buildSubAggs(ObjectArray topBucketsPerOrd, LongArray ordArray) throws IOException {
+ buildSubAggsForAllBuckets(topBucketsPerOrd, ordArray, (b, a) -> b.aggregations = a);
}
@Override
@@ -625,7 +643,7 @@ SignificantStringTerms.Bucket buildEmptyBucket() {
}
@Override
- ObjectArrayPriorityQueue buildPriorityQueue(int size) {
+ ObjectArrayPriorityQueue> buildPriorityQueue(int size) {
return new BucketSignificancePriorityQueue<>(size, bigArrays());
}
@@ -634,7 +652,6 @@ BucketUpdater bucketUpdater(long owningBucketOrd)
long subsetSize = subsetSizes.get(owningBucketOrd);
return (spare, ordsEnum, docCount) -> {
ordsEnum.readValue(spare.termBytes);
- spare.bucketOrd = ordsEnum.ord();
spare.subsetDf = docCount;
spare.supersetDf = backgroundFrequencies.freq(spare.termBytes);
/*
@@ -667,8 +684,8 @@ void finalizeBucket(SignificantStringTerms.Bucket bucket) {
}
@Override
- void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException {
- buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, a) -> b.aggregations = a);
+ void buildSubAggs(ObjectArray topBucketsPerOrd, LongArray ordsArray) throws IOException {
+ buildSubAggsForAllBuckets(topBucketsPerOrd, ordsArray, (b, a) -> b.aggregations = a);
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java
index 5d4c15d8a3b8..a54053f712f8 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java
@@ -14,6 +14,7 @@
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.common.util.IntArray;
import org.elasticsearch.common.util.LongArray;
import org.elasticsearch.common.util.ObjectArray;
import org.elasticsearch.common.util.ObjectArrayPriorityQueue;
@@ -40,6 +41,7 @@
import java.io.IOException;
import java.util.Arrays;
+import java.util.Comparator;
import java.util.Map;
import java.util.function.BiConsumer;
import java.util.function.Function;
@@ -167,42 +169,56 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro
LongArray otherDocCounts = bigArrays().newLongArray(owningBucketOrds.size(), true);
ObjectArray topBucketsPerOrd = buildTopBucketsPerOrd(owningBucketOrds.size())
) {
- for (long ordIdx = 0; ordIdx < topBucketsPerOrd.size(); ordIdx++) {
- final long owningBucketOrd = owningBucketOrds.get(ordIdx);
- collectZeroDocEntriesIfNeeded(owningBucketOrd, excludeDeletedDocs);
- long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrd);
-
- int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize());
- try (ObjectArrayPriorityQueue ordered = buildPriorityQueue(size)) {
- B spare = null;
- BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd);
- BucketUpdater bucketUpdater = bucketUpdater(owningBucketOrd);
- while (ordsEnum.next()) {
- long docCount = bucketDocCount(ordsEnum.ord());
- otherDocCounts.increment(ordIdx, docCount);
- if (docCount < bucketCountThresholds.getShardMinDocCount()) {
- continue;
- }
- if (spare == null) {
- checkRealMemoryCBForInternalBucket();
- spare = buildEmptyBucket();
- }
- bucketUpdater.updateBucket(spare, ordsEnum, docCount);
- spare = ordered.insertWithOverflow(spare);
- }
+ try (IntArray bucketsToCollect = bigArrays().newIntArray(owningBucketOrds.size())) {
+ long ordsToCollect = 0;
+ for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) {
+ final long owningBucketOrd = owningBucketOrds.get(ordIdx);
+ collectZeroDocEntriesIfNeeded(owningBucketOrd, excludeDeletedDocs);
+ int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrd), bucketCountThresholds.getShardSize());
+ bucketsToCollect.set(ordIdx, size);
+ ordsToCollect += size;
+ }
+ try (LongArray ordsArray = bigArrays().newLongArray(ordsToCollect)) {
+ long ordsCollected = 0;
+ for (long ordIdx = 0; ordIdx < topBucketsPerOrd.size(); ordIdx++) {
+ final long owningBucketOrd = owningBucketOrds.get(ordIdx);
+ try (ObjectArrayPriorityQueue> ordered = buildPriorityQueue(bucketsToCollect.get(ordIdx))) {
+ BucketAndOrd spare = null;
+ BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd);
+ BucketUpdater bucketUpdater = bucketUpdater(owningBucketOrd);
+ while (ordsEnum.next()) {
+ long docCount = bucketDocCount(ordsEnum.ord());
+ otherDocCounts.increment(ordIdx, docCount);
+ if (docCount < bucketCountThresholds.getShardMinDocCount()) {
+ continue;
+ }
+ if (spare == null) {
+ checkRealMemoryCBForInternalBucket();
+ spare = new BucketAndOrd<>(buildEmptyBucket());
+ }
+ bucketUpdater.updateBucket(spare.bucket, ordsEnum, docCount);
+ spare.ord = ordsEnum.ord();
+ spare = ordered.insertWithOverflow(spare);
+ }
+
+ // Get the top buckets
+ final int orderedSize = (int) ordered.size();
+ final B[] bucketsForOrd = buildBuckets(orderedSize);
+ for (int b = orderedSize - 1; b >= 0; --b) {
+ BucketAndOrd bucketAndOrd = ordered.pop();
+ bucketsForOrd[b] = bucketAndOrd.bucket;
+ ordsArray.set(ordsCollected + b, bucketAndOrd.ord);
+ otherDocCounts.increment(ordIdx, -bucketAndOrd.bucket.getDocCount());
+ }
+ topBucketsPerOrd.set(ordIdx, bucketsForOrd);
+ ordsCollected += orderedSize;
- // Get the top buckets
- B[] bucketsForOrd = buildBuckets((int) ordered.size());
- topBucketsPerOrd.set(ordIdx, bucketsForOrd);
- for (int b = (int) ordered.size() - 1; b >= 0; --b) {
- topBucketsPerOrd.get(ordIdx)[b] = ordered.pop();
- otherDocCounts.increment(ordIdx, -topBucketsPerOrd.get(ordIdx)[b].getDocCount());
+ }
}
+ assert ordsCollected == ordsArray.size();
+ buildSubAggs(topBucketsPerOrd, ordsArray);
}
}
-
- buildSubAggs(topBucketsPerOrd);
-
return NumericTermsAggregator.this.buildAggregations(
Math.toIntExact(owningBucketOrds.size()),
ordIdx -> buildResult(owningBucketOrds.get(ordIdx), otherDocCounts.get(ordIdx), topBucketsPerOrd.get(ordIdx))
@@ -254,13 +270,13 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro
* Build a {@link ObjectArrayPriorityQueue} to sort the buckets. After we've
* collected all of the buckets we'll collect all entries in the queue.
*/
- abstract ObjectArrayPriorityQueue buildPriorityQueue(int size);
+ abstract ObjectArrayPriorityQueue> buildPriorityQueue(int size);
/**
* Build the sub-aggregations into the buckets. This will usually
- * delegate to {@link #buildSubAggsForAllBuckets}.
+ * delegate to {@link #buildSubAggsForAllBuckets(ObjectArray, LongArray, BiConsumer)}.
*/
- abstract void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException;
+ abstract void buildSubAggs(ObjectArray topBucketsPerOrd, LongArray ordsArray) throws IOException;
/**
* Collect extra entries for "zero" hit documents if they were requested
@@ -287,9 +303,11 @@ interface BucketUpdater
abstract class StandardTermsResultStrategy, B extends InternalTerms.Bucket> extends
ResultStrategy {
protected final boolean showTermDocCountError;
+ private final Comparator> comparator;
- StandardTermsResultStrategy(boolean showTermDocCountError) {
+ StandardTermsResultStrategy(boolean showTermDocCountError, Aggregator aggregator) {
this.showTermDocCountError = showTermDocCountError;
+ this.comparator = order.partiallyBuiltBucketComparator(aggregator);
}
@Override
@@ -298,13 +316,13 @@ final LeafBucketCollector wrapCollector(LeafBucketCollector primary) {
}
@Override
- final ObjectArrayPriorityQueue buildPriorityQueue(int size) {
- return new BucketPriorityQueue<>(size, bigArrays(), partiallyBuiltBucketComparator);
+ final ObjectArrayPriorityQueue> buildPriorityQueue(int size) {
+ return new BucketPriorityQueue<>(size, bigArrays(), comparator);
}
@Override
- final void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException {
- buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs);
+ final void buildSubAggs(ObjectArray topBucketsPerOrd, LongArray ordsArray) throws IOException {
+ buildSubAggsForAllBuckets(topBucketsPerOrd, ordsArray, (b, aggs) -> b.aggregations = aggs);
}
@Override
@@ -340,8 +358,8 @@ public final void close() {}
}
class LongTermsResults extends StandardTermsResultStrategy {
- LongTermsResults(boolean showTermDocCountError) {
- super(showTermDocCountError);
+ LongTermsResults(boolean showTermDocCountError, Aggregator aggregator) {
+ super(showTermDocCountError, aggregator);
}
@Override
@@ -374,7 +392,6 @@ BucketUpdater bucketUpdater(long owningBucketOrd) {
return (LongTerms.Bucket spare, BucketOrdsEnum ordsEnum, long docCount) -> {
spare.term = ordsEnum.value();
spare.docCount = docCount;
- spare.bucketOrd = ordsEnum.ord();
};
}
@@ -424,8 +441,8 @@ LongTerms buildEmptyResult() {
class DoubleTermsResults extends StandardTermsResultStrategy {
- DoubleTermsResults(boolean showTermDocCountError) {
- super(showTermDocCountError);
+ DoubleTermsResults(boolean showTermDocCountError, Aggregator aggregator) {
+ super(showTermDocCountError, aggregator);
}
@Override
@@ -458,7 +475,6 @@ BucketUpdater bucketUpdater(long owningBucketOrd) {
return (DoubleTerms.Bucket spare, BucketOrdsEnum ordsEnum, long docCount) -> {
spare.term = NumericUtils.sortableLongToDouble(ordsEnum.value());
spare.docCount = docCount;
- spare.bucketOrd = ordsEnum.ord();
};
}
@@ -575,7 +591,6 @@ BucketUpdater bucketUpdater(long owningBucketOrd) {
spare.term = ordsEnum.value();
spare.subsetDf = docCount;
spare.supersetDf = backgroundFrequencies.freq(spare.term);
- spare.bucketOrd = ordsEnum.ord();
// During shard-local down-selection we use subset/superset stats that are for this shard only
// Back at the central reducer these properties will be updated with global stats
spare.updateScore(significanceHeuristic, subsetSize, supersetSize);
@@ -583,13 +598,13 @@ BucketUpdater bucketUpdater(long owningBucketOrd) {
}
@Override
- ObjectArrayPriorityQueue buildPriorityQueue(int size) {
+ ObjectArrayPriorityQueue> buildPriorityQueue(int size) {
return new BucketSignificancePriorityQueue<>(size, bigArrays());
}
@Override
- void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException {
- buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs);
+ void buildSubAggs(ObjectArray topBucketsPerOrd, LongArray ordsArray) throws IOException {
+ buildSubAggsForAllBuckets(topBucketsPerOrd, ordsArray, (b, aggs) -> b.aggregations = aggs);
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java
index 4922be7cec1b..c07c0726a4ae 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java
@@ -27,7 +27,6 @@
import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
-import java.util.Comparator;
import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
@@ -190,7 +189,6 @@ public boolean equals(Object obj) {
protected final DocValueFormat format;
protected final BucketCountThresholds bucketCountThresholds;
protected final BucketOrder order;
- protected final Comparator> partiallyBuiltBucketComparator;
protected final Set aggsUsedForSorting;
protected final SubAggCollectionMode collectMode;
@@ -209,7 +207,9 @@ public TermsAggregator(
super(name, factories, context, parent, metadata);
this.bucketCountThresholds = bucketCountThresholds;
this.order = order;
- partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this);
+ if (order != null) {
+ order.validate(this);
+ }
this.format = format;
if ((subAggsNeedScore() && descendsFromNestedAggregator(parent)) || context.isInSortOrderExecutionRequired()) {
/**
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java
index 2c7b768fcdbb..da5ae37b0822 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java
@@ -195,12 +195,12 @@ private static TermsAggregatorSupplier numericSupplier() {
if (includeExclude != null) {
longFilter = includeExclude.convertToDoubleFilter();
}
- resultStrategy = agg -> agg.new DoubleTermsResults(showTermDocCountError);
+ resultStrategy = agg -> agg.new DoubleTermsResults(showTermDocCountError, agg);
} else {
if (includeExclude != null) {
longFilter = includeExclude.convertToLongFilter(valuesSourceConfig.format());
}
- resultStrategy = agg -> agg.new LongTermsResults(showTermDocCountError);
+ resultStrategy = agg -> agg.new LongTermsResults(showTermDocCountError, agg);
}
return new NumericTermsAggregator(
name,
@@ -403,7 +403,7 @@ Aggregator create(
name,
factories,
new MapStringTermsAggregator.ValuesSourceCollectorSource(valuesSourceConfig),
- a -> a.new StandardTermsResults(valuesSourceConfig.getValuesSource()),
+ a -> a.new StandardTermsResults(valuesSourceConfig.getValuesSource(), a),
order,
valuesSourceConfig.format(),
bucketCountThresholds,
diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java
index 0d42a2856a10..85510c8a989c 100644
--- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java
+++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java
@@ -37,9 +37,6 @@ public class InternalMultiTerms extends AbstractInternalTerms {
-
- long bucketOrd;
-
protected long docCount;
protected InternalAggregations aggregations;
private long docCountError;
diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java
index 1691aedf543f..5c10e2c8feeb 100644
--- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java
+++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java
@@ -20,6 +20,7 @@
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.util.IntArray;
import org.elasticsearch.common.util.LongArray;
import org.elasticsearch.common.util.ObjectArray;
import org.elasticsearch.common.util.ObjectArrayPriorityQueue;
@@ -40,6 +41,7 @@
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
import org.elasticsearch.search.aggregations.bucket.DeferableBucketAggregator;
+import org.elasticsearch.search.aggregations.bucket.terms.BucketAndOrd;
import org.elasticsearch.search.aggregations.bucket.terms.BucketPriorityQueue;
import org.elasticsearch.search.aggregations.bucket.terms.BytesKeyedBucketOrds;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator;
@@ -72,7 +74,7 @@ class MultiTermsAggregator extends DeferableBucketAggregator {
protected final List formats;
protected final TermsAggregator.BucketCountThresholds bucketCountThresholds;
protected final BucketOrder order;
- protected final Comparator partiallyBuiltBucketComparator;
+ protected final Comparator> partiallyBuiltBucketComparator;
protected final Set aggsUsedForSorting;
protected final SubAggCollectionMode collectMode;
private final List values;
@@ -99,7 +101,7 @@ protected MultiTermsAggregator(
super(name, factories, context, parent, metadata);
this.bucketCountThresholds = bucketCountThresholds;
this.order = order;
- partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this);
+ partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(this);
this.formats = formats;
this.showTermDocCountError = showTermDocCountError;
if (subAggsNeedScore() && descendsFromNestedAggregator(parent) || context.isInSortOrderExecutionRequired()) {
@@ -242,52 +244,67 @@ public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throw
LongArray otherDocCounts = bigArrays().newLongArray(owningBucketOrds.size(), true);
ObjectArray topBucketsPerOrd = bigArrays().newObjectArray(owningBucketOrds.size())
) {
- for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) {
- final long owningBucketOrd = owningBucketOrds.get(ordIdx);
- long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrd);
-
- int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize());
- try (
- ObjectArrayPriorityQueue ordered = new BucketPriorityQueue<>(
- size,
- bigArrays(),
- partiallyBuiltBucketComparator
- )
- ) {
- InternalMultiTerms.Bucket spare = null;
- BytesRef spareKey = null;
- BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd);
- while (ordsEnum.next()) {
- long docCount = bucketDocCount(ordsEnum.ord());
- otherDocCounts.increment(ordIdx, docCount);
- if (docCount < bucketCountThresholds.getShardMinDocCount()) {
- continue;
- }
- if (spare == null) {
- checkRealMemoryCBForInternalBucket();
- spare = new InternalMultiTerms.Bucket(null, 0, null, showTermDocCountError, 0, formats, keyConverters);
- spareKey = new BytesRef();
- }
- ordsEnum.readValue(spareKey);
- spare.terms = unpackTerms(spareKey);
- spare.docCount = docCount;
- spare.bucketOrd = ordsEnum.ord();
- spare = ordered.insertWithOverflow(spare);
- }
+ try (IntArray bucketsToCollect = bigArrays().newIntArray(owningBucketOrds.size())) {
+ long ordsToCollect = 0;
+ for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) {
+ int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrds.get(ordIdx)), bucketCountThresholds.getShardSize());
+ ordsToCollect += size;
+ bucketsToCollect.set(ordIdx, size);
+ }
+ try (LongArray ordsArray = bigArrays().newLongArray(ordsToCollect)) {
+ long ordsCollected = 0;
+ for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) {
+ final long owningBucketOrd = owningBucketOrds.get(ordIdx);
+ long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrd);
+
+ int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize());
+ try (
+ ObjectArrayPriorityQueue> ordered = new BucketPriorityQueue<>(
+ size,
+ bigArrays(),
+ partiallyBuiltBucketComparator
+ )
+ ) {
+ BucketAndOrd spare = null;
+ BytesRef spareKey = null;
+ BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd);
+ while (ordsEnum.next()) {
+ long docCount = bucketDocCount(ordsEnum.ord());
+ otherDocCounts.increment(ordIdx, docCount);
+ if (docCount < bucketCountThresholds.getShardMinDocCount()) {
+ continue;
+ }
+ if (spare == null) {
+ checkRealMemoryCBForInternalBucket();
+ spare = new BucketAndOrd<>(
+ new InternalMultiTerms.Bucket(null, 0, null, showTermDocCountError, 0, formats, keyConverters)
+ );
+ spareKey = new BytesRef();
+ }
+ ordsEnum.readValue(spareKey);
+ spare.bucket.terms = unpackTerms(spareKey);
+ spare.bucket.docCount = docCount;
+ spare.ord = ordsEnum.ord();
+ spare = ordered.insertWithOverflow(spare);
+ }
- // Get the top buckets
- InternalMultiTerms.Bucket[] bucketsForOrd = new InternalMultiTerms.Bucket[(int) ordered.size()];
- topBucketsPerOrd.set(ordIdx, bucketsForOrd);
- for (int b = (int) ordered.size() - 1; b >= 0; --b) {
- InternalMultiTerms.Bucket[] buckets = topBucketsPerOrd.get(ordIdx);
- buckets[b] = ordered.pop();
- otherDocCounts.increment(ordIdx, -buckets[b].getDocCount());
+ // Get the top buckets
+ int orderedSize = (int) ordered.size();
+ InternalMultiTerms.Bucket[] buckets = new InternalMultiTerms.Bucket[orderedSize];
+ for (int i = orderedSize - 1; i >= 0; --i) {
+ BucketAndOrd bucketAndOrd = ordered.pop();
+ buckets[i] = bucketAndOrd.bucket;
+ ordsArray.set(ordsCollected + i, bucketAndOrd.ord);
+ otherDocCounts.increment(ordIdx, -buckets[i].getDocCount());
+ }
+ topBucketsPerOrd.set(ordIdx, buckets);
+ ordsCollected += orderedSize;
+ }
}
+ buildSubAggsForAllBuckets(topBucketsPerOrd, ordsArray, (b, a) -> b.aggregations = a);
}
}
- buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, a) -> b.aggregations = a);
-
return buildAggregations(
Math.toIntExact(owningBucketOrds.size()),
ordIdx -> buildResult(otherDocCounts.get(ordIdx), topBucketsPerOrd.get(ordIdx))
From 1fecab19254715941f42bdebe025298e89d5574b Mon Sep 17 00:00:00 2001
From: Martijn van Groningen
Date: Thu, 5 Dec 2024 16:25:32 +0100
Subject: [PATCH 03/26] Update synthetic source cutoff date (#118069)
Updating from 01-02-2025T00:00:00UTC to 04-02-2025T00:00:00UTC
---
.../xpack/logsdb/SyntheticSourceLicenseService.java | 2 +-
.../xpack/logsdb/LegacyLicenceIntegrationTests.java | 3 ++-
...SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java | 2 +-
3 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseService.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseService.java
index 26a672fb1c90..e629f9b3998b 100644
--- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseService.java
+++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseService.java
@@ -29,7 +29,7 @@ final class SyntheticSourceLicenseService {
// You can only override this property if you received explicit approval from Elastic.
static final String CUTOFF_DATE_SYS_PROP_NAME = "es.mapping.synthetic_source_fallback_to_stored_source.cutoff_date_restricted_override";
private static final Logger LOGGER = LogManager.getLogger(SyntheticSourceLicenseService.class);
- static final long DEFAULT_CUTOFF_DATE = LocalDateTime.of(2025, 2, 1, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli();
+ static final long DEFAULT_CUTOFF_DATE = LocalDateTime.of(2025, 2, 4, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli();
/**
* A setting that determines whether source mode should always be stored source. Regardless of licence.
diff --git a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LegacyLicenceIntegrationTests.java b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LegacyLicenceIntegrationTests.java
index 890bc464a257..f8f307b572f3 100644
--- a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LegacyLicenceIntegrationTests.java
+++ b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LegacyLicenceIntegrationTests.java
@@ -69,7 +69,8 @@ public void testSyntheticSourceUsageWithLegacyLicense() {
}
public void testSyntheticSourceUsageWithLegacyLicensePastCutoff() throws Exception {
- long startPastCutoff = LocalDateTime.of(2025, 11, 12, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli();
+ // One day after default cutoff date
+ long startPastCutoff = LocalDateTime.of(2025, 2, 5, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli();
putLicense(createGoldOrPlatinumLicense(startPastCutoff));
ensureGreen();
diff --git a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java
index eda0d8786874..c871a7d0216e 100644
--- a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java
+++ b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java
@@ -98,7 +98,7 @@ public void testGetAdditionalIndexSettingsTsdb() throws IOException {
}
public void testGetAdditionalIndexSettingsTsdbAfterCutoffDate() throws Exception {
- long start = LocalDateTime.of(2025, 2, 2, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli();
+ long start = LocalDateTime.of(2025, 2, 5, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli();
License license = createGoldOrPlatinumLicense(start);
long time = LocalDateTime.of(2024, 12, 31, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli();
var licenseState = new XPackLicenseState(() -> time, new XPackLicenseStatus(license.operationMode(), true, null));
From 9d350537608e89624b660ff40f8b96275d8ba9d9 Mon Sep 17 00:00:00 2001
From: kosabogi <105062005+kosabogi@users.noreply.github.com>
Date: Thu, 5 Dec 2024 16:32:59 +0100
Subject: [PATCH 04/26] Adds warning to Create inference API page (#118073)
---
docs/reference/inference/put-inference.asciidoc | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc
index ed93c290b6ad..4f82889f562d 100644
--- a/docs/reference/inference/put-inference.asciidoc
+++ b/docs/reference/inference/put-inference.asciidoc
@@ -10,7 +10,6 @@ Creates an {infer} endpoint to perform an {infer} task.
* For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>.
====
-
[discrete]
[[put-inference-api-request]]
==== {api-request-title}
@@ -47,6 +46,14 @@ Refer to the service list in the <> API. In the response, look for `"state": "fully_allocated"` and ensure the `"allocation_count"` matches the `"target_allocation_count"`.
+* Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
+====
+
+
The following services are available through the {infer} API.
You can find the available task types next to the service name.
Click the links to review the configuration details of the services:
From 5d1bca34f9dbfd3904c624a0f48a474e557577e5 Mon Sep 17 00:00:00 2001
From: Armin Braun
Date: Thu, 5 Dec 2024 17:22:03 +0100
Subject: [PATCH 05/26] Make NestedHelper a utility class (#118071)
Noticed instantiating these instances taking a visible and unexpected
amount of CPU in profiles (probably from bootstrapping the lambda/callsite for the
predicate). This fixes the logic to effectively disappear from profiling
and makes it easier to reason about as well by removing the indirect use
of the search context and just explicitly passing it around.
No need to instantiate instances of this thing either, escape analysis
probably isn't able to remove it because of the recursive instance
method calls.
---
.../index/query/NestedQueryBuilder.java | 3 +-
.../index/search/NestedHelper.java | 59 ++--
.../search/DefaultSearchContext.java | 5 +-
.../search/vectors/KnnVectorQueryBuilder.java | 3 +-
.../index/search/NestedHelperTests.java | 274 ++++++++++--------
.../authz/permission/DocumentPermissions.java | 6 +-
.../planner/EsPhysicalOperationProviders.java | 14 +-
7 files changed, 185 insertions(+), 179 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java
index 83bca7d27aee..503b2adf756f 100644
--- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java
@@ -321,8 +321,7 @@ public static Query toQuery(
// ToParentBlockJoinQuery requires that the inner query only matches documents
// in its child space
- NestedHelper nestedHelper = new NestedHelper(context.nestedLookup(), context::isFieldMapped);
- if (nestedHelper.mightMatchNonNestedDocs(innerQuery, path)) {
+ if (NestedHelper.mightMatchNonNestedDocs(innerQuery, path, context)) {
innerQuery = Queries.filtered(innerQuery, mapper.nestedTypeFilter());
}
diff --git a/server/src/main/java/org/elasticsearch/index/search/NestedHelper.java b/server/src/main/java/org/elasticsearch/index/search/NestedHelper.java
index 96e8ac35c8e3..a04f930e052b 100644
--- a/server/src/main/java/org/elasticsearch/index/search/NestedHelper.java
+++ b/server/src/main/java/org/elasticsearch/index/search/NestedHelper.java
@@ -21,29 +21,21 @@
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermInSetQuery;
import org.apache.lucene.search.TermQuery;
-import org.elasticsearch.index.mapper.NestedLookup;
import org.elasticsearch.index.mapper.NestedObjectMapper;
-
-import java.util.function.Predicate;
+import org.elasticsearch.index.query.SearchExecutionContext;
/** Utility class to filter parent and children clauses when building nested
* queries. */
public final class NestedHelper {
- private final NestedLookup nestedLookup;
- private final Predicate isMappedFieldPredicate;
-
- public NestedHelper(NestedLookup nestedLookup, Predicate isMappedFieldPredicate) {
- this.nestedLookup = nestedLookup;
- this.isMappedFieldPredicate = isMappedFieldPredicate;
- }
+ private NestedHelper() {}
/** Returns true if the given query might match nested documents. */
- public boolean mightMatchNestedDocs(Query query) {
+ public static boolean mightMatchNestedDocs(Query query, SearchExecutionContext searchExecutionContext) {
if (query instanceof ConstantScoreQuery) {
- return mightMatchNestedDocs(((ConstantScoreQuery) query).getQuery());
+ return mightMatchNestedDocs(((ConstantScoreQuery) query).getQuery(), searchExecutionContext);
} else if (query instanceof BoostQuery) {
- return mightMatchNestedDocs(((BoostQuery) query).getQuery());
+ return mightMatchNestedDocs(((BoostQuery) query).getQuery(), searchExecutionContext);
} else if (query instanceof MatchAllDocsQuery) {
return true;
} else if (query instanceof MatchNoDocsQuery) {
@@ -51,17 +43,17 @@ public boolean mightMatchNestedDocs(Query query) {
} else if (query instanceof TermQuery) {
// We only handle term(s) queries and range queries, which should already
// cover a high majority of use-cases
- return mightMatchNestedDocs(((TermQuery) query).getTerm().field());
+ return mightMatchNestedDocs(((TermQuery) query).getTerm().field(), searchExecutionContext);
} else if (query instanceof TermInSetQuery tis) {
if (tis.getTermsCount() > 0) {
- return mightMatchNestedDocs(tis.getField());
+ return mightMatchNestedDocs(tis.getField(), searchExecutionContext);
} else {
return false;
}
} else if (query instanceof PointRangeQuery) {
- return mightMatchNestedDocs(((PointRangeQuery) query).getField());
+ return mightMatchNestedDocs(((PointRangeQuery) query).getField(), searchExecutionContext);
} else if (query instanceof IndexOrDocValuesQuery) {
- return mightMatchNestedDocs(((IndexOrDocValuesQuery) query).getIndexQuery());
+ return mightMatchNestedDocs(((IndexOrDocValuesQuery) query).getIndexQuery(), searchExecutionContext);
} else if (query instanceof final BooleanQuery bq) {
final boolean hasRequiredClauses = bq.clauses().stream().anyMatch(BooleanClause::isRequired);
if (hasRequiredClauses) {
@@ -69,13 +61,13 @@ public boolean mightMatchNestedDocs(Query query) {
.stream()
.filter(BooleanClause::isRequired)
.map(BooleanClause::query)
- .allMatch(this::mightMatchNestedDocs);
+ .allMatch(f -> mightMatchNestedDocs(f, searchExecutionContext));
} else {
return bq.clauses()
.stream()
.filter(c -> c.occur() == Occur.SHOULD)
.map(BooleanClause::query)
- .anyMatch(this::mightMatchNestedDocs);
+ .anyMatch(f -> mightMatchNestedDocs(f, searchExecutionContext));
}
} else if (query instanceof ESToParentBlockJoinQuery) {
return ((ESToParentBlockJoinQuery) query).getPath() != null;
@@ -85,7 +77,7 @@ public boolean mightMatchNestedDocs(Query query) {
}
/** Returns true if a query on the given field might match nested documents. */
- boolean mightMatchNestedDocs(String field) {
+ private static boolean mightMatchNestedDocs(String field, SearchExecutionContext searchExecutionContext) {
if (field.startsWith("_")) {
// meta field. Every meta field behaves differently, eg. nested
// documents have the same _uid as their parent, put their path in
@@ -94,36 +86,36 @@ boolean mightMatchNestedDocs(String field) {
// we might add a nested filter when it is nor required.
return true;
}
- if (isMappedFieldPredicate.test(field) == false) {
+ if (searchExecutionContext.isFieldMapped(field) == false) {
// field does not exist
return false;
}
- return nestedLookup.getNestedParent(field) != null;
+ return searchExecutionContext.nestedLookup().getNestedParent(field) != null;
}
/** Returns true if the given query might match parent documents or documents
* that are nested under a different path. */
- public boolean mightMatchNonNestedDocs(Query query, String nestedPath) {
+ public static boolean mightMatchNonNestedDocs(Query query, String nestedPath, SearchExecutionContext searchExecutionContext) {
if (query instanceof ConstantScoreQuery) {
- return mightMatchNonNestedDocs(((ConstantScoreQuery) query).getQuery(), nestedPath);
+ return mightMatchNonNestedDocs(((ConstantScoreQuery) query).getQuery(), nestedPath, searchExecutionContext);
} else if (query instanceof BoostQuery) {
- return mightMatchNonNestedDocs(((BoostQuery) query).getQuery(), nestedPath);
+ return mightMatchNonNestedDocs(((BoostQuery) query).getQuery(), nestedPath, searchExecutionContext);
} else if (query instanceof MatchAllDocsQuery) {
return true;
} else if (query instanceof MatchNoDocsQuery) {
return false;
} else if (query instanceof TermQuery) {
- return mightMatchNonNestedDocs(((TermQuery) query).getTerm().field(), nestedPath);
+ return mightMatchNonNestedDocs(searchExecutionContext, ((TermQuery) query).getTerm().field(), nestedPath);
} else if (query instanceof TermInSetQuery tis) {
if (tis.getTermsCount() > 0) {
- return mightMatchNonNestedDocs(tis.getField(), nestedPath);
+ return mightMatchNonNestedDocs(searchExecutionContext, tis.getField(), nestedPath);
} else {
return false;
}
} else if (query instanceof PointRangeQuery) {
- return mightMatchNonNestedDocs(((PointRangeQuery) query).getField(), nestedPath);
+ return mightMatchNonNestedDocs(searchExecutionContext, ((PointRangeQuery) query).getField(), nestedPath);
} else if (query instanceof IndexOrDocValuesQuery) {
- return mightMatchNonNestedDocs(((IndexOrDocValuesQuery) query).getIndexQuery(), nestedPath);
+ return mightMatchNonNestedDocs(((IndexOrDocValuesQuery) query).getIndexQuery(), nestedPath, searchExecutionContext);
} else if (query instanceof final BooleanQuery bq) {
final boolean hasRequiredClauses = bq.clauses().stream().anyMatch(BooleanClause::isRequired);
if (hasRequiredClauses) {
@@ -131,13 +123,13 @@ public boolean mightMatchNonNestedDocs(Query query, String nestedPath) {
.stream()
.filter(BooleanClause::isRequired)
.map(BooleanClause::query)
- .allMatch(q -> mightMatchNonNestedDocs(q, nestedPath));
+ .allMatch(q -> mightMatchNonNestedDocs(q, nestedPath, searchExecutionContext));
} else {
return bq.clauses()
.stream()
.filter(c -> c.occur() == Occur.SHOULD)
.map(BooleanClause::query)
- .anyMatch(q -> mightMatchNonNestedDocs(q, nestedPath));
+ .anyMatch(q -> mightMatchNonNestedDocs(q, nestedPath, searchExecutionContext));
}
} else {
return true;
@@ -146,7 +138,7 @@ public boolean mightMatchNonNestedDocs(Query query, String nestedPath) {
/** Returns true if a query on the given field might match parent documents
* or documents that are nested under a different path. */
- boolean mightMatchNonNestedDocs(String field, String nestedPath) {
+ private static boolean mightMatchNonNestedDocs(SearchExecutionContext searchExecutionContext, String field, String nestedPath) {
if (field.startsWith("_")) {
// meta field. Every meta field behaves differently, eg. nested
// documents have the same _uid as their parent, put their path in
@@ -155,9 +147,10 @@ boolean mightMatchNonNestedDocs(String field, String nestedPath) {
// we might add a nested filter when it is nor required.
return true;
}
- if (isMappedFieldPredicate.test(field) == false) {
+ if (searchExecutionContext.isFieldMapped(field) == false) {
return false;
}
+ var nestedLookup = searchExecutionContext.nestedLookup();
String nestedParent = nestedLookup.getNestedParent(field);
if (nestedParent == null || nestedParent.startsWith(nestedPath) == false) {
// the field is not a sub field of the nested path
diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java
index 8ac35f7c40ca..b87d097413b6 100644
--- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java
+++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java
@@ -444,10 +444,9 @@ public void preProcess() {
public Query buildFilteredQuery(Query query) {
List filters = new ArrayList<>();
NestedLookup nestedLookup = searchExecutionContext.nestedLookup();
- NestedHelper nestedHelper = new NestedHelper(nestedLookup, searchExecutionContext::isFieldMapped);
if (nestedLookup != NestedLookup.EMPTY
- && nestedHelper.mightMatchNestedDocs(query)
- && (aliasFilter == null || nestedHelper.mightMatchNestedDocs(aliasFilter))) {
+ && NestedHelper.mightMatchNestedDocs(query, searchExecutionContext)
+ && (aliasFilter == null || NestedHelper.mightMatchNestedDocs(aliasFilter, searchExecutionContext))) {
filters.add(Queries.newNonNestedFilter(searchExecutionContext.indexVersionCreated()));
}
diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java
index deb7e6bd035b..5dd2cbf32dd1 100644
--- a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java
@@ -481,10 +481,9 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException {
}
parentBitSet = context.bitsetFilter(parentFilter);
if (filterQuery != null) {
- NestedHelper nestedHelper = new NestedHelper(context.nestedLookup(), context::isFieldMapped);
// We treat the provided filter as a filter over PARENT documents, so if it might match nested documents
// we need to adjust it.
- if (nestedHelper.mightMatchNestedDocs(filterQuery)) {
+ if (NestedHelper.mightMatchNestedDocs(filterQuery, context)) {
// Ensure that the query only returns parent documents matching `filterQuery`
filterQuery = Queries.filtered(filterQuery, parentFilter);
}
diff --git a/server/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java b/server/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java
index a7a1d33badf2..b2583eb176de 100644
--- a/server/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java
+++ b/server/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java
@@ -17,6 +17,7 @@
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.join.ScoreMode;
+import org.elasticsearch.index.mapper.MapperMetrics;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MapperServiceTestCase;
import org.elasticsearch.index.query.MatchAllQueryBuilder;
@@ -27,12 +28,15 @@
import java.io.IOException;
import java.util.Collections;
+import static java.util.Collections.emptyMap;
import static org.mockito.Mockito.mock;
public class NestedHelperTests extends MapperServiceTestCase {
MapperService mapperService;
+ SearchExecutionContext searchExecutionContext;
+
@Override
public void setUp() throws Exception {
super.setUp();
@@ -68,167 +72,185 @@ public void setUp() throws Exception {
} }
""";
mapperService = createMapperService(mapping);
- }
-
- private static NestedHelper buildNestedHelper(MapperService mapperService) {
- return new NestedHelper(mapperService.mappingLookup().nestedLookup(), field -> mapperService.fieldType(field) != null);
+ searchExecutionContext = new SearchExecutionContext(
+ 0,
+ 0,
+ mapperService.getIndexSettings(),
+ null,
+ null,
+ mapperService,
+ mapperService.mappingLookup(),
+ null,
+ null,
+ parserConfig(),
+ writableRegistry(),
+ null,
+ null,
+ System::currentTimeMillis,
+ null,
+ null,
+ () -> true,
+ null,
+ emptyMap(),
+ MapperMetrics.NOOP
+ );
}
public void testMatchAll() {
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(new MatchAllDocsQuery()));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(new MatchAllDocsQuery(), "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(new MatchAllDocsQuery(), "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(new MatchAllDocsQuery(), "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(new MatchAllDocsQuery(), "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(new MatchAllDocsQuery(), searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(new MatchAllDocsQuery(), "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(new MatchAllDocsQuery(), "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(new MatchAllDocsQuery(), "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(new MatchAllDocsQuery(), "nested_missing", searchExecutionContext));
}
public void testMatchNo() {
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(new MatchNoDocsQuery()));
- assertFalse(buildNestedHelper(mapperService).mightMatchNonNestedDocs(new MatchNoDocsQuery(), "nested1"));
- assertFalse(buildNestedHelper(mapperService).mightMatchNonNestedDocs(new MatchNoDocsQuery(), "nested2"));
- assertFalse(buildNestedHelper(mapperService).mightMatchNonNestedDocs(new MatchNoDocsQuery(), "nested3"));
- assertFalse(buildNestedHelper(mapperService).mightMatchNonNestedDocs(new MatchNoDocsQuery(), "nested_missing"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(new MatchNoDocsQuery(), searchExecutionContext));
+ assertFalse(NestedHelper.mightMatchNonNestedDocs(new MatchNoDocsQuery(), "nested1", searchExecutionContext));
+ assertFalse(NestedHelper.mightMatchNonNestedDocs(new MatchNoDocsQuery(), "nested2", searchExecutionContext));
+ assertFalse(NestedHelper.mightMatchNonNestedDocs(new MatchNoDocsQuery(), "nested3", searchExecutionContext));
+ assertFalse(NestedHelper.mightMatchNonNestedDocs(new MatchNoDocsQuery(), "nested_missing", searchExecutionContext));
}
public void testTermsQuery() {
Query termsQuery = mapperService.fieldType("foo").termsQuery(Collections.singletonList("bar"), null);
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(termsQuery));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested_missing"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(termsQuery, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested_missing", searchExecutionContext));
termsQuery = mapperService.fieldType("nested1.foo").termsQuery(Collections.singletonList("bar"), null);
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(termsQuery));
- assertFalse(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(termsQuery, searchExecutionContext));
+ assertFalse(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested_missing", searchExecutionContext));
termsQuery = mapperService.fieldType("nested2.foo").termsQuery(Collections.singletonList("bar"), null);
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(termsQuery));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(termsQuery, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested_missing", searchExecutionContext));
termsQuery = mapperService.fieldType("nested3.foo").termsQuery(Collections.singletonList("bar"), null);
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(termsQuery));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(termsQuery, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested_missing", searchExecutionContext));
}
public void testTermQuery() {
Query termQuery = mapperService.fieldType("foo").termQuery("bar", null);
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(termQuery));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested_missing"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(termQuery, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested_missing", searchExecutionContext));
termQuery = mapperService.fieldType("nested1.foo").termQuery("bar", null);
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(termQuery));
- assertFalse(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(termQuery, searchExecutionContext));
+ assertFalse(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested_missing", searchExecutionContext));
termQuery = mapperService.fieldType("nested2.foo").termQuery("bar", null);
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(termQuery));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(termQuery, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested_missing", searchExecutionContext));
termQuery = mapperService.fieldType("nested3.foo").termQuery("bar", null);
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(termQuery));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(termQuery, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested_missing", searchExecutionContext));
}
public void testRangeQuery() {
SearchExecutionContext context = mock(SearchExecutionContext.class);
Query rangeQuery = mapperService.fieldType("foo2").rangeQuery(2, 5, true, true, null, null, null, context);
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(rangeQuery));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested_missing"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(rangeQuery, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested_missing", searchExecutionContext));
rangeQuery = mapperService.fieldType("nested1.foo2").rangeQuery(2, 5, true, true, null, null, null, context);
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(rangeQuery));
- assertFalse(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(rangeQuery, searchExecutionContext));
+ assertFalse(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested_missing", searchExecutionContext));
rangeQuery = mapperService.fieldType("nested2.foo2").rangeQuery(2, 5, true, true, null, null, null, context);
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(rangeQuery));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(rangeQuery, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested_missing", searchExecutionContext));
rangeQuery = mapperService.fieldType("nested3.foo2").rangeQuery(2, 5, true, true, null, null, null, context);
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(rangeQuery));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(rangeQuery, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested_missing", searchExecutionContext));
}
public void testDisjunction() {
BooleanQuery bq = new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.SHOULD)
.add(new TermQuery(new Term("foo", "baz")), Occur.SHOULD)
.build();
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested1"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested1", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("nested1.foo", "bar")), Occur.SHOULD)
.add(new TermQuery(new Term("nested1.foo", "baz")), Occur.SHOULD)
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertFalse(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested1"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertFalse(NestedHelper.mightMatchNonNestedDocs(bq, "nested1", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("nested2.foo", "bar")), Occur.SHOULD)
.add(new TermQuery(new Term("nested2.foo", "baz")), Occur.SHOULD)
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested2"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested2", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("nested3.foo", "bar")), Occur.SHOULD)
.add(new TermQuery(new Term("nested3.foo", "baz")), Occur.SHOULD)
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested3"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested3", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.SHOULD)
.add(new MatchAllDocsQuery(), Occur.SHOULD)
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested1"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested1", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("nested1.foo", "bar")), Occur.SHOULD)
.add(new MatchAllDocsQuery(), Occur.SHOULD)
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested1"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested1", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("nested2.foo", "bar")), Occur.SHOULD)
.add(new MatchAllDocsQuery(), Occur.SHOULD)
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested2"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested2", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("nested3.foo", "bar")), Occur.SHOULD)
.add(new MatchAllDocsQuery(), Occur.SHOULD)
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested3"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested3", searchExecutionContext));
}
private static Occur requiredOccur() {
@@ -239,42 +261,42 @@ public void testConjunction() {
BooleanQuery bq = new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), requiredOccur())
.add(new MatchAllDocsQuery(), requiredOccur())
.build();
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested1"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested1", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("nested1.foo", "bar")), requiredOccur())
.add(new MatchAllDocsQuery(), requiredOccur())
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertFalse(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested1"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertFalse(NestedHelper.mightMatchNonNestedDocs(bq, "nested1", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("nested2.foo", "bar")), requiredOccur())
.add(new MatchAllDocsQuery(), requiredOccur())
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested2"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested2", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("nested3.foo", "bar")), requiredOccur())
.add(new MatchAllDocsQuery(), requiredOccur())
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested3"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested3", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new MatchAllDocsQuery(), requiredOccur()).add(new MatchAllDocsQuery(), requiredOccur()).build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested1"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested1", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new MatchAllDocsQuery(), requiredOccur()).add(new MatchAllDocsQuery(), requiredOccur()).build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested1"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested1", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new MatchAllDocsQuery(), requiredOccur()).add(new MatchAllDocsQuery(), requiredOccur()).build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested2"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested2", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new MatchAllDocsQuery(), requiredOccur()).add(new MatchAllDocsQuery(), requiredOccur()).build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested3"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested3", searchExecutionContext));
}
public void testNested() throws IOException {
@@ -288,11 +310,11 @@ public void testNested() throws IOException {
.build();
assertEquals(expectedChildQuery, query.getChildQuery());
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(query));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested_missing"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(query, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested_missing", searchExecutionContext));
queryBuilder = new NestedQueryBuilder("nested1", new TermQueryBuilder("nested1.foo", "bar"), ScoreMode.Avg);
query = (ESToParentBlockJoinQuery) queryBuilder.toQuery(context);
@@ -301,11 +323,11 @@ public void testNested() throws IOException {
expectedChildQuery = new TermQuery(new Term("nested1.foo", "bar"));
assertEquals(expectedChildQuery, query.getChildQuery());
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(query));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested_missing"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(query, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested_missing", searchExecutionContext));
queryBuilder = new NestedQueryBuilder("nested2", new TermQueryBuilder("nested2.foo", "bar"), ScoreMode.Avg);
query = (ESToParentBlockJoinQuery) queryBuilder.toQuery(context);
@@ -316,11 +338,11 @@ public void testNested() throws IOException {
.build();
assertEquals(expectedChildQuery, query.getChildQuery());
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(query));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested_missing"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(query, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested_missing", searchExecutionContext));
queryBuilder = new NestedQueryBuilder("nested3", new TermQueryBuilder("nested3.foo", "bar"), ScoreMode.Avg);
query = (ESToParentBlockJoinQuery) queryBuilder.toQuery(context);
@@ -331,10 +353,10 @@ public void testNested() throws IOException {
.build();
assertEquals(expectedChildQuery, query.getChildQuery());
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(query));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested_missing"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(query, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested_missing", searchExecutionContext));
}
}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java
index 14ecf4cb0d6e..24f0a5243620 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java
@@ -160,10 +160,8 @@ private static void buildRoleQuery(
failIfQueryUsesClient(queryBuilder, context);
Query roleQuery = context.toQuery(queryBuilder).query();
filter.add(roleQuery, SHOULD);
- NestedLookup nestedLookup = context.nestedLookup();
- if (nestedLookup != NestedLookup.EMPTY) {
- NestedHelper nestedHelper = new NestedHelper(nestedLookup, context::isFieldMapped);
- if (nestedHelper.mightMatchNestedDocs(roleQuery)) {
+ if (context.nestedLookup() != NestedLookup.EMPTY) {
+ if (NestedHelper.mightMatchNestedDocs(roleQuery, context)) {
roleQuery = new BooleanQuery.Builder().add(roleQuery, FILTER)
.add(Queries.newNonNestedFilter(context.indexVersionCreated()), FILTER)
.build();
diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java
index 39e2a3bc1d5a..17468f7afec1 100644
--- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java
+++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java
@@ -298,15 +298,11 @@ public SourceLoader newSourceLoader() {
@Override
public Query toQuery(QueryBuilder queryBuilder) {
Query query = ctx.toQuery(queryBuilder).query();
- NestedLookup nestedLookup = ctx.nestedLookup();
- if (nestedLookup != NestedLookup.EMPTY) {
- NestedHelper nestedHelper = new NestedHelper(nestedLookup, ctx::isFieldMapped);
- if (nestedHelper.mightMatchNestedDocs(query)) {
- // filter out nested documents
- query = new BooleanQuery.Builder().add(query, BooleanClause.Occur.MUST)
- .add(newNonNestedFilter(ctx.indexVersionCreated()), BooleanClause.Occur.FILTER)
- .build();
- }
+ if (ctx.nestedLookup() != NestedLookup.EMPTY && NestedHelper.mightMatchNestedDocs(query, ctx)) {
+ // filter out nested documents
+ query = new BooleanQuery.Builder().add(query, BooleanClause.Occur.MUST)
+ .add(newNonNestedFilter(ctx.indexVersionCreated()), BooleanClause.Occur.FILTER)
+ .build();
}
if (aliasFilter != AliasFilter.EMPTY) {
Query filterQuery = ctx.toQuery(aliasFilter.getQueryBuilder()).query();
From 949816f8585982e8b38f0a3433ffb1270e56e9ff Mon Sep 17 00:00:00 2001
From: Dimitris Rempapis
Date: Thu, 5 Dec 2024 18:35:41 +0200
Subject: [PATCH 06/26] SearchServiceTests.testParseSourceValidation failure
(#117963)
Remove test for deprecated code removed for v_9
---
docs/changelog/117963.yaml | 5 +
muted-tests.yml | 3 -
.../search/SearchServiceTests.java | 115 ------------------
3 files changed, 5 insertions(+), 118 deletions(-)
create mode 100644 docs/changelog/117963.yaml
diff --git a/docs/changelog/117963.yaml b/docs/changelog/117963.yaml
new file mode 100644
index 000000000000..4a50dc175786
--- /dev/null
+++ b/docs/changelog/117963.yaml
@@ -0,0 +1,5 @@
+pr: 117963
+summary: '`SearchServiceTests.testParseSourceValidation` failure'
+area: Search
+type: bug
+issues: []
diff --git a/muted-tests.yml b/muted-tests.yml
index b8d82f00bc43..735a34a3b45d 100644
--- a/muted-tests.yml
+++ b/muted-tests.yml
@@ -103,9 +103,6 @@ tests:
- class: org.elasticsearch.search.StressSearchServiceReaperIT
method: testStressReaper
issue: https://github.com/elastic/elasticsearch/issues/115816
-- class: org.elasticsearch.search.SearchServiceTests
- method: testParseSourceValidation
- issue: https://github.com/elastic/elasticsearch/issues/115936
- class: org.elasticsearch.xpack.application.connector.ConnectorIndexServiceTests
issue: https://github.com/elastic/elasticsearch/issues/116087
- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT
diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java
index d1ccfcbe7873..89fd25f638e1 100644
--- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java
+++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java
@@ -95,7 +95,6 @@
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.builder.PointInTimeBuilder;
import org.elasticsearch.search.builder.SearchSourceBuilder;
-import org.elasticsearch.search.collapse.CollapseBuilder;
import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.fetch.FetchSearchResult;
import org.elasticsearch.search.fetch.ShardFetchRequest;
@@ -124,7 +123,6 @@
import org.elasticsearch.search.rank.feature.RankFeatureResult;
import org.elasticsearch.search.rank.feature.RankFeatureShardRequest;
import org.elasticsearch.search.rank.feature.RankFeatureShardResult;
-import org.elasticsearch.search.slice.SliceBuilder;
import org.elasticsearch.search.suggest.SuggestBuilder;
import org.elasticsearch.tasks.TaskCancelHelper;
import org.elasticsearch.tasks.TaskCancelledException;
@@ -2930,119 +2928,6 @@ public void testSlicingBehaviourForParallelCollection() throws Exception {
}
}
- /**
- * This method tests validation that happens on the data nodes, which is now performed on the coordinating node.
- * We still need the validation to cover for mixed cluster scenarios where the coordinating node does not perform the check yet.
- */
- public void testParseSourceValidation() {
- String index = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT);
- IndexService indexService = createIndex(index);
- final SearchService service = getInstanceFromNode(SearchService.class);
- {
- // scroll and search_after
- SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder());
- searchRequest.scroll(new TimeValue(1000));
- searchRequest.source().searchAfter(new String[] { "value" });
- assertCreateContextValidation(searchRequest, "`search_after` cannot be used in a scroll context.", indexService, service);
- }
- {
- // scroll and collapse
- SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder());
- searchRequest.scroll(new TimeValue(1000));
- searchRequest.source().collapse(new CollapseBuilder("field"));
- assertCreateContextValidation(searchRequest, "cannot use `collapse` in a scroll context", indexService, service);
- }
- {
- // search_after and `from` isn't valid
- SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder());
- searchRequest.source().searchAfter(new String[] { "value" });
- searchRequest.source().from(10);
- assertCreateContextValidation(
- searchRequest,
- "`from` parameter must be set to 0 when `search_after` is used",
- indexService,
- service
- );
- }
- {
- // slice without scroll or pit
- SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder());
- searchRequest.source().slice(new SliceBuilder(1, 10));
- assertCreateContextValidation(
- searchRequest,
- "[slice] can only be used with [scroll] or [point-in-time] requests",
- indexService,
- service
- );
- }
- {
- // stored fields disabled with _source requested
- SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder());
- searchRequest.source().storedField("_none_");
- searchRequest.source().fetchSource(true);
- assertCreateContextValidation(
- searchRequest,
- "[stored_fields] cannot be disabled if [_source] is requested",
- indexService,
- service
- );
- }
- {
- // stored fields disabled with fetch fields requested
- SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder());
- searchRequest.source().storedField("_none_");
- searchRequest.source().fetchSource(false);
- searchRequest.source().fetchField("field");
- assertCreateContextValidation(
- searchRequest,
- "[stored_fields] cannot be disabled when using the [fields] option",
- indexService,
- service
- );
- }
- }
-
- private static void assertCreateContextValidation(
- SearchRequest searchRequest,
- String errorMessage,
- IndexService indexService,
- SearchService searchService
- ) {
- ShardId shardId = new ShardId(indexService.index(), 0);
- long nowInMillis = System.currentTimeMillis();
- String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10);
- searchRequest.allowPartialSearchResults(randomBoolean());
- ShardSearchRequest request = new ShardSearchRequest(
- OriginalIndices.NONE,
- searchRequest,
- shardId,
- 0,
- indexService.numberOfShards(),
- AliasFilter.EMPTY,
- 1f,
- nowInMillis,
- clusterAlias
- );
-
- SearchShardTask task = new SearchShardTask(1, "type", "action", "description", null, emptyMap());
-
- ReaderContext readerContext = null;
- try {
- ReaderContext createOrGetReaderContext = searchService.createOrGetReaderContext(request);
- readerContext = createOrGetReaderContext;
- IllegalArgumentException exception = expectThrows(
- IllegalArgumentException.class,
- () -> searchService.createContext(createOrGetReaderContext, request, task, ResultsType.QUERY, randomBoolean())
- );
- assertThat(exception.getMessage(), containsString(errorMessage));
- } finally {
- if (readerContext != null) {
- readerContext.close();
- searchService.freeReaderContext(readerContext.id());
- }
- }
- }
-
private static ReaderContext createReaderContext(IndexService indexService, IndexShard indexShard) {
return new ReaderContext(
new ShardSearchContextId(UUIDs.randomBase64UUID(), randomNonNegativeLong()),
From 176bf7a85abb2e2af06f60901b448bef945d528b Mon Sep 17 00:00:00 2001
From: Mark Tozzi
Date: Thu, 5 Dec 2024 11:53:15 -0500
Subject: [PATCH 07/26] ESQL Javadoc for creating new data types (#117520)
This adds some java doc to the DataType enum, listing out the steps I followed for adding DateNanos. Hopefully it's helpful to future folks adding data types.
---------
Co-authored-by: Bogdan Pintea
---
.../xpack/esql/core/type/DataType.java | 107 ++++++++++++++++++
1 file changed, 107 insertions(+)
diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java
index a63571093ba5..d86cdb0de038 100644
--- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java
+++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java
@@ -32,6 +32,113 @@
import static org.elasticsearch.xpack.esql.core.util.PlanStreamInput.readCachedStringWithVersionCheck;
import static org.elasticsearch.xpack.esql.core.util.PlanStreamOutput.writeCachedStringWithVersionCheck;
+/**
+ * This enum represents data types the ES|QL query processing layer is able to
+ * interact with in some way. This includes fully representable types (e.g.
+ * {@link DataType#LONG}, numeric types which we promote (e.g. {@link DataType#SHORT})
+ * or fold into other types (e.g. {@link DataType#DATE_PERIOD}) early in the
+ * processing pipeline, types for internal use
+ * cases (e.g. {@link DataType#PARTIAL_AGG}), and types which the language
+ * doesn't support, but require special handling anyway (e.g.
+ * {@link DataType#OBJECT})
+ *
+ *
Process for adding a new data type
+ * Note: it is not expected that all the following steps be done in a single PR.
+ * Use capabilities to gate tests as you go, and use as many PRs as you think
+ * appropriate. New data types are complex, and smaller PRs will make reviews
+ * easier.
+ *
+ *
+ * Create a new feature flag for the type in {@link EsqlCorePlugin}. We
+ * recommend developing the data type over a series of smaller PRs behind
+ * a feature flag; even for relatively simple data types.
+ *
+ * Add a capability to EsqlCapabilities related to the new type, and
+ * gated by the feature flag you just created. Again, using the feature
+ * flag is preferred over snapshot-only. As development progresses, you may
+ * need to add more capabilities related to the new type, e.g. for
+ * supporting specific functions. This is fine, and expected.
+ *
+ * Create a new CSV test file for the new type. You'll either need to
+ * create a new data file as well, or add values of the new type to
+ * and existing data file. See CsvTestDataLoader for creating a new data
+ * set.
+ *
+ * In the new CSV test file, start adding basic functionality tests.
+ * These should include reading and returning values, both from indexed data
+ * and from the ROW command. It should also include functions that support
+ * "every" type, such as Case or MvFirst.
+ *
+ * Add the new type to the CsvTestUtils#Type enum, if it isn't already
+ * there. You also need to modify CsvAssert to support reading values
+ * of the new type.
+ *
+ * At this point, the CSV tests should fail with a sensible ES|QL error
+ * message. Make sure they're failing in ES|QL, not in the test
+ * framework.
+ *
+ * Add the new data type to this enum. This will cause a bunch of
+ * compile errors for switch statements throughout the code. Resolve those
+ * as appropriate. That is the main way in which the new type will be tied
+ * into the framework.
+ *
+ * Add the new type to the {@link DataType#UNDER_CONSTRUCTION}
+ * collection. This is used by the test framework to disable some checks
+ * around how functions report their supported types, which would otherwise
+ * generate a lot of noise while the type is still in development.
+ *
+ * Add typed data generators to TestCaseSupplier, and make sure all
+ * functions that support the new type have tests for it.
+ *
+ * Work to support things all types should do. Equality and the
+ * "typeless" MV functions (MvFirst, MvLast, and MvCount) should work for
+ * most types. Case and Coalesce should also support all types.
+ * If the type has a natural ordering, make sure to test
+ * sorting and the other binary comparisons. Make sure these functions all
+ * have CSV tests that run against indexed data.
+ *
+ * Add conversion functions as appropriate. Almost all types should
+ * support ToString, and should have a "ToType" function that accepts a
+ * string. There may be other logical conversions depending on the nature
+ * of the type. Make sure to add the conversion function to the
+ * TYPE_TO_CONVERSION_FUNCTION map in EsqlDataTypeConverter. Make sure the
+ * conversion functions have CSV tests that run against indexed data.
+ *
+ * Support the new type in aggregations that are type independent.
+ * This includes Values, Count, and Count Distinct. Make sure there are
+ * CSV tests against indexed data for these.
+ *
+ * Support other functions and aggregations as appropriate, making sure
+ * to included CSV tests.
+ *
+ * Consider how the type will interact with other types. For example,
+ * if the new type is numeric, it may be good for it to be comparable with
+ * other numbers. Supporting this may require new logic in
+ * EsqlDataTypeConverter#commonType, individual function type checking, the
+ * verifier rules, or other places. We suggest starting with CSV tests and
+ * seeing where they fail.
+ *
+ * There are some additional steps that should be taken when removing the
+ * feature flag and getting ready for a release:
+ *
+ *
+ * Ensure the capabilities for this type are always enabled
+ *
+ *
+ * Remove the type from the {@link DataType#UNDER_CONSTRUCTION}
+ * collection
+ *
+ * Fix new test failures related to declared function types
+ *
+ *
+ * Make sure to run the full test suite locally via gradle to generate
+ * the function type tables and helper files with the new type. Ensure all
+ * the functions that support the type have appropriate docs for it.
+ *
+ * If appropriate, remove the type from the ESQL limitations list of
+ * unsupported types.
+ *
+ */
public enum DataType {
/**
* Fields of this type are unsupported by any functions and are always
From 162140e1d2e1c82faa5eada4d97a1143ba89afde Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?=
Date: Thu, 5 Dec 2024 17:58:56 +0100
Subject: [PATCH 08/26] Close URLClassLoaders to make Windows happy deleting
the temp test jar files (#118083)
---
.../bootstrap/PluginsResolverTests.java | 67 ++++++++++---------
1 file changed, 35 insertions(+), 32 deletions(-)
diff --git a/server/src/test/java/org/elasticsearch/bootstrap/PluginsResolverTests.java b/server/src/test/java/org/elasticsearch/bootstrap/PluginsResolverTests.java
index 331f0f7ad13e..798b576500d7 100644
--- a/server/src/test/java/org/elasticsearch/bootstrap/PluginsResolverTests.java
+++ b/server/src/test/java/org/elasticsearch/bootstrap/PluginsResolverTests.java
@@ -136,25 +136,28 @@ public void testResolveMultipleNonModularPlugins() throws IOException, ClassNotF
Path jar1 = createNonModularPluginJar(home, "plugin1", "p", "A");
Path jar2 = createNonModularPluginJar(home, "plugin2", "q", "B");
- var loader1 = createClassLoader(jar1);
- var loader2 = createClassLoader(jar2);
-
- PluginBundle bundle1 = createMockBundle("plugin1", null, "p.A");
- PluginBundle bundle2 = createMockBundle("plugin2", null, "q.B");
- PluginsLoader mockPluginsLoader = mock(PluginsLoader.class);
-
- when(mockPluginsLoader.pluginLayers()).thenReturn(
- Stream.of(new TestPluginLayer(bundle1, loader1, ModuleLayer.boot()), new TestPluginLayer(bundle2, loader2, ModuleLayer.boot()))
- );
- PluginsResolver pluginsResolver = PluginsResolver.create(mockPluginsLoader);
-
- var testClass1 = loader1.loadClass("p.A");
- var testClass2 = loader2.loadClass("q.B");
- var resolvedPluginName1 = pluginsResolver.resolveClassToPluginName(testClass1);
- var resolvedPluginName2 = pluginsResolver.resolveClassToPluginName(testClass2);
-
- assertEquals("plugin1", resolvedPluginName1);
- assertEquals("plugin2", resolvedPluginName2);
+ try (var loader1 = createClassLoader(jar1); var loader2 = createClassLoader(jar2)) {
+
+ PluginBundle bundle1 = createMockBundle("plugin1", null, "p.A");
+ PluginBundle bundle2 = createMockBundle("plugin2", null, "q.B");
+ PluginsLoader mockPluginsLoader = mock(PluginsLoader.class);
+
+ when(mockPluginsLoader.pluginLayers()).thenReturn(
+ Stream.of(
+ new TestPluginLayer(bundle1, loader1, ModuleLayer.boot()),
+ new TestPluginLayer(bundle2, loader2, ModuleLayer.boot())
+ )
+ );
+ PluginsResolver pluginsResolver = PluginsResolver.create(mockPluginsLoader);
+
+ var testClass1 = loader1.loadClass("p.A");
+ var testClass2 = loader2.loadClass("q.B");
+ var resolvedPluginName1 = pluginsResolver.resolveClassToPluginName(testClass1);
+ var resolvedPluginName2 = pluginsResolver.resolveClassToPluginName(testClass2);
+
+ assertEquals("plugin1", resolvedPluginName1);
+ assertEquals("plugin2", resolvedPluginName2);
+ }
}
public void testResolveNonModularPlugin() throws IOException, ClassNotFoundException {
@@ -164,22 +167,22 @@ public void testResolveNonModularPlugin() throws IOException, ClassNotFoundExcep
Path jar = createNonModularPluginJar(home, pluginName, "p", "A");
- var loader = createClassLoader(jar);
-
- PluginBundle bundle = createMockBundle(pluginName, null, "p.A");
- PluginsLoader mockPluginsLoader = mock(PluginsLoader.class);
+ try (var loader = createClassLoader(jar)) {
+ PluginBundle bundle = createMockBundle(pluginName, null, "p.A");
+ PluginsLoader mockPluginsLoader = mock(PluginsLoader.class);
- when(mockPluginsLoader.pluginLayers()).thenReturn(Stream.of(new TestPluginLayer(bundle, loader, ModuleLayer.boot())));
- PluginsResolver pluginsResolver = PluginsResolver.create(mockPluginsLoader);
+ when(mockPluginsLoader.pluginLayers()).thenReturn(Stream.of(new TestPluginLayer(bundle, loader, ModuleLayer.boot())));
+ PluginsResolver pluginsResolver = PluginsResolver.create(mockPluginsLoader);
- var testClass = loader.loadClass("p.A");
- var resolvedPluginName = pluginsResolver.resolveClassToPluginName(testClass);
- var unresolvedPluginName1 = pluginsResolver.resolveClassToPluginName(PluginsResolver.class);
- var unresolvedPluginName2 = pluginsResolver.resolveClassToPluginName(String.class);
+ var testClass = loader.loadClass("p.A");
+ var resolvedPluginName = pluginsResolver.resolveClassToPluginName(testClass);
+ var unresolvedPluginName1 = pluginsResolver.resolveClassToPluginName(PluginsResolver.class);
+ var unresolvedPluginName2 = pluginsResolver.resolveClassToPluginName(String.class);
- assertEquals(pluginName, resolvedPluginName);
- assertNull(unresolvedPluginName1);
- assertNull(unresolvedPluginName2);
+ assertEquals(pluginName, resolvedPluginName);
+ assertNull(unresolvedPluginName1);
+ assertNull(unresolvedPluginName2);
+ }
}
private static URLClassLoader createClassLoader(Path jar) throws MalformedURLException {
From eb4f33ae7be785404a97f6ca9c42f94749de3599 Mon Sep 17 00:00:00 2001
From: elasticsearchmachine
<58790826+elasticsearchmachine@users.noreply.github.com>
Date: Fri, 6 Dec 2024 04:09:50 +1100
Subject: [PATCH 09/26] Mute
org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT
org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT #117981
---
muted-tests.yml | 2 ++
1 file changed, 2 insertions(+)
diff --git a/muted-tests.yml b/muted-tests.yml
index 735a34a3b45d..a09e46415fdc 100644
--- a/muted-tests.yml
+++ b/muted-tests.yml
@@ -251,6 +251,8 @@ tests:
- class: org.elasticsearch.packaging.test.ArchiveTests
method: test40AutoconfigurationNotTriggeredWhenNodeIsMeantToJoinExistingCluster
issue: https://github.com/elastic/elasticsearch/issues/118029
+- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT
+ issue: https://github.com/elastic/elasticsearch/issues/117981
# Examples:
#
From 21f72f8f6a9e2518a21631e61713880aa027c2c4 Mon Sep 17 00:00:00 2001
From: Benjamin Trent
Date: Thu, 5 Dec 2024 12:15:48 -0500
Subject: [PATCH 10/26] Removing dead/unused deprecation logger code (#118082)
---
.../rest/action/document/RestGetSourceAction.java | 4 ----
.../rest/action/document/RestMultiTermVectorsAction.java | 3 ---
.../org/elasticsearch/rest/action/search/RestCountAction.java | 3 ---
.../elasticsearch/rest/action/search/RestSearchAction.java | 3 ---
.../org/elasticsearch/search/builder/SearchSourceBuilder.java | 2 --
.../org/elasticsearch/search/sort/GeoDistanceSortBuilder.java | 2 --
6 files changed, 17 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestGetSourceAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestGetSourceAction.java
index a09fcbd0c527..7e4d23db7028 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/document/RestGetSourceAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestGetSourceAction.java
@@ -15,7 +15,6 @@
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestChannel;
@@ -40,9 +39,6 @@
*/
@ServerlessScope(Scope.PUBLIC)
public class RestGetSourceAction extends BaseRestHandler {
- private final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestGetSourceAction.class);
- static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in get_source and exist_source "
- + "requests is deprecated.";
@Override
public List routes() {
diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java
index 65aa1869a41e..9d39bf7f343c 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java
@@ -13,7 +13,6 @@
import org.elasticsearch.action.termvectors.TermVectorsRequest;
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.Scope;
@@ -28,8 +27,6 @@
@ServerlessScope(Scope.PUBLIC)
public class RestMultiTermVectorsAction extends BaseRestHandler {
- private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestMultiTermVectorsAction.class);
- static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in multi term vector requests is deprecated.";
@Override
public List routes() {
diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java
index c1a55874bfc5..b0e08b376f9d 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java
@@ -14,7 +14,6 @@
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestRequest;
@@ -36,8 +35,6 @@
@ServerlessScope(Scope.PUBLIC)
public class RestCountAction extends BaseRestHandler {
- private final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestCountAction.class);
- static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in count requests is deprecated.";
@Override
public List routes() {
diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
index ff062084a3cb..a9c2ff7576b0 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
@@ -16,7 +16,6 @@
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.core.Booleans;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.features.NodeFeature;
@@ -56,8 +55,6 @@
@ServerlessScope(Scope.PUBLIC)
public class RestSearchAction extends BaseRestHandler {
- private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestSearchAction.class);
- public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in search requests is deprecated.";
/**
* Indicates whether hits.total should be rendered as an integer or an object
diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
index 098a2b2f45d2..3554a6dc08b9 100644
--- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
@@ -19,7 +19,6 @@
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
-import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.core.Booleans;
@@ -92,7 +91,6 @@
* @see SearchRequest#source(SearchSourceBuilder)
*/
public final class SearchSourceBuilder implements Writeable, ToXContentObject, Rewriteable {
- private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(SearchSourceBuilder.class);
public static final ParseField FROM_FIELD = new ParseField("from");
public static final ParseField SIZE_FIELD = new ParseField("size");
diff --git a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java
index 6640f0f85840..2aaade35fb8f 100644
--- a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java
@@ -28,7 +28,6 @@
import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.unit.DistanceUnit;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.fielddata.FieldData;
@@ -67,7 +66,6 @@
* A geo distance based sorting on a geo point like field.
*/
public class GeoDistanceSortBuilder extends SortBuilder {
- private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(GeoDistanceSortBuilder.class);
public static final String NAME = "_geo_distance";
public static final String ALTERNATIVE_NAME = "_geoDistance";
From 4740b02a9b1c7c7ae0c6383c5985720bbdfa353c Mon Sep 17 00:00:00 2001
From: Henrique Paes
Date: Thu, 5 Dec 2024 12:22:48 -0500
Subject: [PATCH 11/26] Wrap jackson exception on malformed json string
(#114445)
This commit hides the underlying Jackson parse exception when encountered while parsing string tokens.
---
docs/changelog/114445.yaml | 6 ++++++
.../xcontent/provider/json/JsonXContentParser.java | 6 +++++-
.../java/org/elasticsearch/http/BulkRestIT.java | 3 +--
.../common/xcontent/json/JsonXContentTests.java | 13 +++++++++++++
4 files changed, 25 insertions(+), 3 deletions(-)
create mode 100644 docs/changelog/114445.yaml
diff --git a/docs/changelog/114445.yaml b/docs/changelog/114445.yaml
new file mode 100644
index 000000000000..afbc080d1e0b
--- /dev/null
+++ b/docs/changelog/114445.yaml
@@ -0,0 +1,6 @@
+pr: 114445
+summary: Wrap jackson exception on malformed json string
+area: Infra/Core
+type: bug
+issues:
+ - 114142
diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java
index d42c56845d03..38ef8bc2e4ef 100644
--- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java
+++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java
@@ -108,7 +108,11 @@ public String text() throws IOException {
if (currentToken().isValue() == false) {
throwOnNoText();
}
- return parser.getText();
+ try {
+ return parser.getText();
+ } catch (JsonParseException e) {
+ throw newXContentParseException(e);
+ }
}
private void throwOnNoText() {
diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BulkRestIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BulkRestIT.java
index 369d0824bdb2..3faa88339f0a 100644
--- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BulkRestIT.java
+++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BulkRestIT.java
@@ -74,8 +74,7 @@ public void testBulkInvalidIndexNameString() throws IOException {
ResponseException responseException = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request));
assertThat(responseException.getResponse().getStatusLine().getStatusCode(), equalTo(BAD_REQUEST.getStatus()));
- assertThat(responseException.getMessage(), containsString("could not parse bulk request body"));
- assertThat(responseException.getMessage(), containsString("json_parse_exception"));
+ assertThat(responseException.getMessage(), containsString("x_content_parse_exception"));
assertThat(responseException.getMessage(), containsString("Invalid UTF-8"));
}
diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java
index 55f6cc5498d8..4135ead545e0 100644
--- a/server/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java
+++ b/server/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java
@@ -11,6 +11,9 @@
import org.elasticsearch.common.xcontent.BaseXContentTestCase;
import org.elasticsearch.xcontent.XContentGenerator;
+import org.elasticsearch.xcontent.XContentParseException;
+import org.elasticsearch.xcontent.XContentParser;
+import org.elasticsearch.xcontent.XContentParserConfiguration;
import org.elasticsearch.xcontent.XContentType;
import org.elasticsearch.xcontent.json.JsonXContent;
@@ -28,4 +31,14 @@ public void testBigInteger() throws Exception {
XContentGenerator generator = JsonXContent.jsonXContent.createGenerator(os);
doTestBigInteger(generator, os);
}
+
+ public void testMalformedJsonFieldThrowsXContentException() throws Exception {
+ String json = "{\"test\":\"/*/}";
+ try (XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, json)) {
+ parser.nextToken();
+ parser.nextToken();
+ parser.nextToken();
+ assertThrows(XContentParseException.class, () -> parser.text());
+ }
+ }
}
From 7070e95fa78ef29df363d1d49cd05f0b79a835bf Mon Sep 17 00:00:00 2001
From: Mark Vieira
Date: Thu, 5 Dec 2024 09:43:18 -0800
Subject: [PATCH 12/26] Update BWC version logic to support multiple bugfix
versions (#117943)
---
.buildkite/pipelines/intake.yml | 2 +-
.buildkite/pipelines/periodic.yml | 4 +-
.ci/snapshotBwcVersions | 1 +
...lDistributionBwcSetupPluginFuncTest.groovy | 24 ++-
...lDistributionDownloadPluginFuncTest.groovy | 4 +-
...acyYamlRestCompatTestPluginFuncTest.groovy | 16 +-
.../distribution/bwc/bugfix2/build.gradle | 0
.../distribution/bwc/maintenance/build.gradle | 0
.../internal/fake_git/remote/settings.gradle | 2 +
.../gradle/internal/BwcVersions.java | 126 ++++++-----
.../internal/info/GlobalBuildInfoPlugin.java | 25 ++-
.../gradle/internal/BwcVersionsSpec.groovy | 196 +++++++++++-------
...stractDistributionDownloadPluginTests.java | 14 +-
.../fixtures/AbstractGradleFuncTest.groovy | 18 +-
distribution/bwc/bugfix2/build.gradle | 0
settings.gradle | 1 +
16 files changed, 273 insertions(+), 160 deletions(-)
create mode 100644 build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/distribution/bwc/bugfix2/build.gradle
create mode 100644 build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/distribution/bwc/maintenance/build.gradle
create mode 100644 distribution/bwc/bugfix2/build.gradle
diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml
index 6c8b8edfcbac..4bc72aec2097 100644
--- a/.buildkite/pipelines/intake.yml
+++ b/.buildkite/pipelines/intake.yml
@@ -56,7 +56,7 @@ steps:
timeout_in_minutes: 300
matrix:
setup:
- BWC_VERSION: ["8.16.2", "8.17.0", "8.18.0", "9.0.0"]
+ BWC_VERSION: ["8.15.6", "8.16.2", "8.17.0", "8.18.0", "9.0.0"]
agents:
provider: gcp
image: family/elasticsearch-ubuntu-2004
diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml
index 69d11ef1dabb..3d6095d0b9e6 100644
--- a/.buildkite/pipelines/periodic.yml
+++ b/.buildkite/pipelines/periodic.yml
@@ -448,7 +448,7 @@ steps:
setup:
ES_RUNTIME_JAVA:
- openjdk21
- BWC_VERSION: ["8.16.2", "8.17.0", "8.18.0", "9.0.0"]
+ BWC_VERSION: ["8.15.6", "8.16.2", "8.17.0", "8.18.0", "9.0.0"]
agents:
provider: gcp
image: family/elasticsearch-ubuntu-2004
@@ -490,7 +490,7 @@ steps:
ES_RUNTIME_JAVA:
- openjdk21
- openjdk23
- BWC_VERSION: ["8.16.2", "8.17.0", "8.18.0", "9.0.0"]
+ BWC_VERSION: ["8.15.6", "8.16.2", "8.17.0", "8.18.0", "9.0.0"]
agents:
provider: gcp
image: family/elasticsearch-ubuntu-2004
diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions
index 5514fc376a28..f92881da7fea 100644
--- a/.ci/snapshotBwcVersions
+++ b/.ci/snapshotBwcVersions
@@ -1,4 +1,5 @@
BWC_VERSION:
+ - "8.15.6"
- "8.16.2"
- "8.17.0"
- "8.18.0"
diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy
index 6d080e1c8076..bb100b6b2388 100644
--- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy
+++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy
@@ -9,9 +9,10 @@
package org.elasticsearch.gradle.internal
+import spock.lang.Unroll
+
import org.elasticsearch.gradle.fixtures.AbstractGitAwareGradleFuncTest
import org.gradle.testkit.runner.TaskOutcome
-import spock.lang.Unroll
class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleFuncTest {
@@ -23,8 +24,10 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF
apply plugin: 'elasticsearch.internal-distribution-bwc-setup'
"""
execute("git branch origin/8.x", file("cloned"))
+ execute("git branch origin/8.3", file("cloned"))
+ execute("git branch origin/8.2", file("cloned"))
+ execute("git branch origin/8.1", file("cloned"))
execute("git branch origin/7.16", file("cloned"))
- execute("git branch origin/7.15", file("cloned"))
}
def "builds distribution from branches via archives extractedAssemble"() {
@@ -48,10 +51,11 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF
assertOutputContains(result.output, "[$bwcDistVersion] > Task :distribution:archives:darwin-tar:${expectedAssembleTaskName}")
where:
- bwcDistVersion | bwcProject | expectedAssembleTaskName
- "8.0.0" | "minor" | "extractedAssemble"
- "7.16.0" | "staged" | "extractedAssemble"
- "7.15.2" | "bugfix" | "extractedAssemble"
+ bwcDistVersion | bwcProject | expectedAssembleTaskName
+ "8.4.0" | "minor" | "extractedAssemble"
+ "8.3.0" | "staged" | "extractedAssemble"
+ "8.2.1" | "bugfix" | "extractedAssemble"
+ "8.1.3" | "bugfix2" | "extractedAssemble"
}
@Unroll
@@ -70,8 +74,8 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF
where:
bwcDistVersion | platform
- "8.0.0" | "darwin"
- "8.0.0" | "linux"
+ "8.4.0" | "darwin"
+ "8.4.0" | "linux"
}
def "bwc expanded distribution folder can be resolved as bwc project artifact"() {
@@ -107,11 +111,11 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF
result.task(":resolveExpandedDistribution").outcome == TaskOutcome.SUCCESS
result.task(":distribution:bwc:minor:buildBwcDarwinTar").outcome == TaskOutcome.SUCCESS
and: "assemble task triggered"
- result.output.contains("[8.0.0] > Task :distribution:archives:darwin-tar:extractedAssemble")
+ result.output.contains("[8.4.0] > Task :distribution:archives:darwin-tar:extractedAssemble")
result.output.contains("expandedRootPath /distribution/bwc/minor/build/bwc/checkout-8.x/" +
"distribution/archives/darwin-tar/build/install")
result.output.contains("nested folder /distribution/bwc/minor/build/bwc/checkout-8.x/" +
- "distribution/archives/darwin-tar/build/install/elasticsearch-8.0.0-SNAPSHOT")
+ "distribution/archives/darwin-tar/build/install/elasticsearch-8.4.0-SNAPSHOT")
}
}
diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy
index eb6185e5aed5..fc5d432a9ef9 100644
--- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy
+++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy
@@ -57,7 +57,7 @@ class InternalDistributionDownloadPluginFuncTest extends AbstractGradleFuncTest
elasticsearch_distributions {
test_distro {
- version = "8.0.0"
+ version = "8.4.0"
type = "archive"
platform = "linux"
architecture = Architecture.current();
@@ -87,7 +87,7 @@ class InternalDistributionDownloadPluginFuncTest extends AbstractGradleFuncTest
elasticsearch_distributions {
test_distro {
- version = "8.0.0"
+ version = "8.4.0"
type = "archive"
platform = "linux"
architecture = Architecture.current();
diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy
index e3efe3d7ffbf..15b057a05e03 100644
--- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy
+++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy
@@ -40,7 +40,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe
given:
internalBuild()
- subProject(":distribution:bwc:staged") << """
+ subProject(":distribution:bwc:minor") << """
configurations { checkout }
artifacts {
checkout(new File(projectDir, "checkoutDir"))
@@ -61,11 +61,11 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe
result.task(transformTask).outcome == TaskOutcome.NO_SOURCE
}
- def "yamlRestCompatTest executes and copies api and transforms tests from :bwc:staged"() {
+ def "yamlRestCompatTest executes and copies api and transforms tests from :bwc:minor"() {
given:
internalBuild()
- subProject(":distribution:bwc:staged") << """
+ subProject(":distribution:bwc:minor") << """
configurations { checkout }
artifacts {
checkout(new File(projectDir, "checkoutDir"))
@@ -98,8 +98,8 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe
String api = "foo.json"
String test = "10_basic.yml"
//add the compatible test and api files, these are the prior version's normal yaml rest tests
- file("distribution/bwc/staged/checkoutDir/rest-api-spec/src/main/resources/rest-api-spec/api/" + api) << ""
- file("distribution/bwc/staged/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/" + test) << ""
+ file("distribution/bwc/minor/checkoutDir/rest-api-spec/src/main/resources/rest-api-spec/api/" + api) << ""
+ file("distribution/bwc/minor/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/" + test) << ""
when:
def result = gradleRunner("yamlRestCompatTest").build()
@@ -145,7 +145,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe
given:
internalBuild()
withVersionCatalogue()
- subProject(":distribution:bwc:staged") << """
+ subProject(":distribution:bwc:minor") << """
configurations { checkout }
artifacts {
checkout(new File(projectDir, "checkoutDir"))
@@ -186,7 +186,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe
given:
internalBuild()
- subProject(":distribution:bwc:staged") << """
+ subProject(":distribution:bwc:minor") << """
configurations { checkout }
artifacts {
checkout(new File(projectDir, "checkoutDir"))
@@ -230,7 +230,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe
setupRestResources([], [])
- file("distribution/bwc/staged/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/test.yml" ) << """
+ file("distribution/bwc/minor/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/test.yml" ) << """
"one":
- do:
do_.some.key_to_replace:
diff --git a/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/distribution/bwc/bugfix2/build.gradle b/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/distribution/bwc/bugfix2/build.gradle
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/distribution/bwc/maintenance/build.gradle b/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/distribution/bwc/maintenance/build.gradle
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/settings.gradle b/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/settings.gradle
index 8c321294b585..e931537fcd6e 100644
--- a/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/settings.gradle
+++ b/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/settings.gradle
@@ -10,9 +10,11 @@
rootProject.name = "root"
include ":distribution:bwc:bugfix"
+include ":distribution:bwc:bugfix2"
include ":distribution:bwc:minor"
include ":distribution:bwc:major"
include ":distribution:bwc:staged"
+include ":distribution:bwc:maintenance"
include ":distribution:archives:darwin-tar"
include ":distribution:archives:oss-darwin-tar"
include ":distribution:archives:darwin-aarch64-tar"
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java
index 93c2623a23d3..37b28389ad97 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java
@@ -21,14 +21,15 @@
import java.util.Optional;
import java.util.Set;
import java.util.TreeMap;
-import java.util.TreeSet;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Predicate;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import static java.util.Collections.reverseOrder;
import static java.util.Collections.unmodifiableList;
+import static java.util.Comparator.comparing;
/**
* A container for elasticsearch supported version information used in BWC testing.
@@ -73,11 +74,11 @@ public class BwcVersions implements Serializable {
private final transient List versions;
private final Map unreleased;
- public BwcVersions(List versionLines) {
- this(versionLines, Version.fromString(VersionProperties.getElasticsearch()));
+ public BwcVersions(List versionLines, List developmentBranches) {
+ this(versionLines, Version.fromString(VersionProperties.getElasticsearch()), developmentBranches);
}
- public BwcVersions(Version currentVersionProperty, List allVersions) {
+ public BwcVersions(Version currentVersionProperty, List allVersions, List developmentBranches) {
if (allVersions.isEmpty()) {
throw new IllegalArgumentException("Could not parse any versions");
}
@@ -86,12 +87,12 @@ public BwcVersions(Version currentVersionProperty, List allVersions) {
this.currentVersion = allVersions.get(allVersions.size() - 1);
assertCurrentVersionMatchesParsed(currentVersionProperty);
- this.unreleased = computeUnreleased();
+ this.unreleased = computeUnreleased(developmentBranches);
}
// Visible for testing
- BwcVersions(List versionLines, Version currentVersionProperty) {
- this(currentVersionProperty, parseVersionLines(versionLines));
+ BwcVersions(List versionLines, Version currentVersionProperty, List developmentBranches) {
+ this(currentVersionProperty, parseVersionLines(versionLines), developmentBranches);
}
private static List parseVersionLines(List versionLines) {
@@ -126,58 +127,77 @@ public void forPreviousUnreleased(Consumer consumer) {
getUnreleased().stream().filter(version -> version.equals(currentVersion) == false).map(unreleased::get).forEach(consumer);
}
- private String getBranchFor(Version version) {
- if (version.equals(currentVersion)) {
- // Just assume the current branch is 'main'. It's actually not important, we never check out the current branch.
- return "main";
- } else {
+ private String getBranchFor(Version version, List developmentBranches) {
+ // If the current version matches a specific feature freeze branch, use that
+ if (developmentBranches.contains(version.getMajor() + "." + version.getMinor())) {
return version.getMajor() + "." + version.getMinor();
+ } else if (developmentBranches.contains(version.getMajor() + ".x")) { // Otherwise if an n.x branch exists and we are that major
+ return version.getMajor() + ".x";
+ } else { // otherwise we're the main branch
+ return "main";
}
}
- private Map computeUnreleased() {
- Set unreleased = new TreeSet<>();
- // The current version is being worked, is always unreleased
- unreleased.add(currentVersion);
- // Recurse for all unreleased versions starting from the current version
- addUnreleased(unreleased, currentVersion, 0);
+ private Map computeUnreleased(List developmentBranches) {
+ Map result = new TreeMap<>();
- // Grab the latest version from the previous major if necessary as well, this is going to be a maintenance release
- Version maintenance = versions.stream()
- .filter(v -> v.getMajor() == currentVersion.getMajor() - 1)
- .max(Comparator.naturalOrder())
- .orElseThrow();
- // This is considered the maintenance release only if we haven't yet encountered it
- boolean hasMaintenanceRelease = unreleased.add(maintenance);
+ // The current version is always in development
+ String currentBranch = getBranchFor(currentVersion, developmentBranches);
+ result.put(currentVersion, new UnreleasedVersionInfo(currentVersion, currentBranch, ":distribution"));
+
+ // Check for an n.x branch as well
+ if (currentBranch.equals("main") && developmentBranches.stream().anyMatch(s -> s.endsWith(".x"))) {
+ // This should correspond to the latest new minor
+ Version version = versions.stream()
+ .sorted(Comparator.reverseOrder())
+ .filter(v -> v.getMajor() == (currentVersion.getMajor() - 1) && v.getRevision() == 0)
+ .findFirst()
+ .orElseThrow(() -> new IllegalStateException("Unable to determine development version for branch"));
+ String branch = getBranchFor(version, developmentBranches);
+ assert branch.equals(currentVersion.getMajor() - 1 + ".x") : "Expected branch does not match development branch";
+
+ result.put(version, new UnreleasedVersionInfo(version, branch, ":distribution:bwc:minor"));
+ }
- List unreleasedList = unreleased.stream().sorted(Comparator.reverseOrder()).toList();
- Map result = new TreeMap<>();
- boolean newMinor = false;
- for (int i = 0; i < unreleasedList.size(); i++) {
- Version esVersion = unreleasedList.get(i);
- // This is either a new minor or staged release
- if (currentVersion.equals(esVersion)) {
- result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution"));
- } else if (esVersion.getRevision() == 0) {
- // If there are two upcoming unreleased minors then this one is the new minor
- if (newMinor == false && unreleasedList.get(i + 1).getRevision() == 0) {
- result.put(esVersion, new UnreleasedVersionInfo(esVersion, esVersion.getMajor() + ".x", ":distribution:bwc:minor"));
- newMinor = true;
- } else if (newMinor == false
- && unreleasedList.stream().filter(v -> v.getMajor() == esVersion.getMajor() && v.getRevision() == 0).count() == 1) {
- // This is the only unreleased new minor which means we've not yet staged it for release
- result.put(esVersion, new UnreleasedVersionInfo(esVersion, esVersion.getMajor() + ".x", ":distribution:bwc:minor"));
- newMinor = true;
- } else {
- result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution:bwc:staged"));
- }
- } else {
- // If this is the oldest unreleased version and we have a maintenance release
- if (i == unreleasedList.size() - 1 && hasMaintenanceRelease) {
- result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution:bwc:maintenance"));
- } else {
- result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution:bwc:bugfix"));
- }
+ // Now handle all the feature freeze branches
+ List featureFreezeBranches = developmentBranches.stream()
+ .filter(b -> Pattern.matches("[0-9]+\\.[0-9]+", b))
+ .sorted(reverseOrder(comparing(s -> Version.fromString(s, Version.Mode.RELAXED))))
+ .toList();
+
+ boolean existingBugfix = false;
+ for (int i = 0; i < featureFreezeBranches.size(); i++) {
+ String branch = featureFreezeBranches.get(i);
+ Version version = versions.stream()
+ .sorted(Comparator.reverseOrder())
+ .filter(v -> v.toString().startsWith(branch))
+ .findFirst()
+ .orElse(null);
+
+ // If we don't know about this version we can ignore it
+ if (version == null) {
+ continue;
+ }
+
+ // If this is the current version we can ignore as we've already handled it
+ if (version.equals(currentVersion)) {
+ continue;
+ }
+
+ // We only maintain compatibility back one major so ignore anything older
+ if (currentVersion.getMajor() - version.getMajor() > 1) {
+ continue;
+ }
+
+ // This is the maintenance version
+ if (i == featureFreezeBranches.size() - 1) {
+ result.put(version, new UnreleasedVersionInfo(version, branch, ":distribution:bwc:maintenance"));
+ } else if (version.getRevision() == 0) { // This is the next staged minor
+ result.put(version, new UnreleasedVersionInfo(version, branch, ":distribution:bwc:staged"));
+ } else { // This is a bugfix
+ String project = existingBugfix ? "bugfix2" : "bugfix";
+ result.put(version, new UnreleasedVersionInfo(version, branch, ":distribution:bwc:" + project));
+ existingBugfix = true;
}
}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java
index 0535026b2594..27d2a66feb20 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java
@@ -8,6 +8,9 @@
*/
package org.elasticsearch.gradle.internal.info;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
import org.apache.commons.io.IOUtils;
import org.elasticsearch.gradle.VersionProperties;
import org.elasticsearch.gradle.internal.BwcVersions;
@@ -44,11 +47,13 @@
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
+import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
+import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import java.util.Random;
@@ -68,6 +73,7 @@ public class GlobalBuildInfoPlugin implements Plugin {
private final JavaInstallationRegistry javaInstallationRegistry;
private final JvmMetadataDetector metadataDetector;
private final ProviderFactory providers;
+ private final ObjectMapper objectMapper;
private JavaToolchainService toolChainService;
private Project project;
@@ -82,7 +88,7 @@ public GlobalBuildInfoPlugin(
this.javaInstallationRegistry = javaInstallationRegistry;
this.metadataDetector = new ErrorTraceMetadataDetector(metadataDetector);
this.providers = providers;
-
+ this.objectMapper = new ObjectMapper();
}
@Override
@@ -190,12 +196,27 @@ private BwcVersions resolveBwcVersions() {
);
try (var is = new FileInputStream(versionsFilePath)) {
List versionLines = IOUtils.readLines(is, "UTF-8");
- return new BwcVersions(versionLines);
+ return new BwcVersions(versionLines, getDevelopmentBranches());
} catch (IOException e) {
throw new IllegalStateException("Unable to resolve to resolve bwc versions from versionsFile.", e);
}
}
+ private List getDevelopmentBranches() {
+ List branches = new ArrayList<>();
+ File branchesFile = new File(Util.locateElasticsearchWorkspace(project.getGradle()), "branches.json");
+ try (InputStream is = new FileInputStream(branchesFile)) {
+ JsonNode json = objectMapper.readTree(is);
+ for (JsonNode node : json.get("branches")) {
+ branches.add(node.get("branch").asText());
+ }
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+
+ return branches;
+ }
+
private void logGlobalBuildInfo(BuildParameterExtension buildParams) {
final String osName = System.getProperty("os.name");
final String osVersion = System.getProperty("os.version");
diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy
index 9c7d20d84a67..4d033564a42b 100644
--- a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy
+++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy
@@ -17,8 +17,9 @@ import org.elasticsearch.gradle.internal.BwcVersions.UnreleasedVersionInfo
class BwcVersionsSpec extends Specification {
List versionLines = []
- def "current version is next minor with next major and last minor both staged"() {
+ def "current version is next major"() {
given:
+ addVersion('7.17.10', '8.9.0')
addVersion('8.14.0', '9.9.0')
addVersion('8.14.1', '9.9.0')
addVersion('8.14.2', '9.9.0')
@@ -29,25 +30,25 @@ class BwcVersionsSpec extends Specification {
addVersion('8.16.1', '9.10.0')
addVersion('8.17.0', '9.10.0')
addVersion('9.0.0', '10.0.0')
- addVersion('9.1.0', '10.1.0')
when:
- def bwc = new BwcVersions(versionLines, v('9.1.0'))
+ def bwc = new BwcVersions(versionLines, v('9.0.0'), ['main', '8.x', '8.16', '8.15', '7.17'])
def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] }
then:
unreleased == [
+ (v('8.15.2')): new UnreleasedVersionInfo(v('8.15.2'), '8.15', ':distribution:bwc:bugfix2'),
(v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution:bwc:bugfix'),
- (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.17', ':distribution:bwc:staged'),
- (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), '9.x', ':distribution:bwc:minor'),
- (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), 'main', ':distribution')
+ (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.x', ':distribution:bwc:minor'),
+ (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), 'main', ':distribution'),
]
- bwc.wireCompatible == [v('8.17.0'), v('9.0.0'), v('9.1.0')]
- bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('9.0.0'), v('9.1.0')]
+ bwc.wireCompatible == [v('8.17.0'), v('9.0.0')]
+ bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('9.0.0')]
}
- def "current is next minor with upcoming minor staged"() {
+ def "current version is next major with staged minor"() {
given:
+ addVersion('7.17.10', '8.9.0')
addVersion('8.14.0', '9.9.0')
addVersion('8.14.1', '9.9.0')
addVersion('8.14.2', '9.9.0')
@@ -57,53 +58,106 @@ class BwcVersionsSpec extends Specification {
addVersion('8.16.0', '9.10.0')
addVersion('8.16.1', '9.10.0')
addVersion('8.17.0', '9.10.0')
- addVersion('8.17.1', '9.10.0')
+ addVersion('8.18.0', '9.10.0')
addVersion('9.0.0', '10.0.0')
- addVersion('9.1.0', '10.1.0')
when:
- def bwc = new BwcVersions(versionLines, v('9.1.0'))
+ def bwc = new BwcVersions(versionLines, v('9.0.0'), ['main', '8.x', '8.17', '8.16', '8.15', '7.17'])
def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] }
then:
unreleased == [
- (v('8.17.1')): new UnreleasedVersionInfo(v('8.17.1'), '8.17', ':distribution:bwc:bugfix'),
+ (v('8.15.2')): new UnreleasedVersionInfo(v('8.15.2'), '8.15', ':distribution:bwc:bugfix2'),
+ (v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution:bwc:bugfix'),
+ (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.17', ':distribution:bwc:staged'),
+ (v('8.18.0')): new UnreleasedVersionInfo(v('8.18.0'), '8.x', ':distribution:bwc:minor'),
+ (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), 'main', ':distribution'),
+ ]
+ bwc.wireCompatible == [v('8.18.0'), v('9.0.0')]
+ bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.18.0'), v('9.0.0')]
+ }
+
+ def "current version is first new minor in major series"() {
+ given:
+ addVersion('7.17.10', '8.9.0')
+ addVersion('8.16.0', '9.10.0')
+ addVersion('8.16.1', '9.10.0')
+ addVersion('8.17.0', '9.10.0')
+ addVersion('8.18.0', '9.10.0')
+ addVersion('9.0.0', '10.0.0')
+ addVersion('9.1.0', '10.0.0')
+
+ when:
+ def bwc = new BwcVersions(versionLines, v('9.1.0'), ['main', '9.0', '8.18'])
+ def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] }
+
+ then:
+ unreleased == [
+ (v('8.18.0')): new UnreleasedVersionInfo(v('8.18.0'), '8.18', ':distribution:bwc:maintenance'),
(v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), '9.0', ':distribution:bwc:staged'),
- (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), 'main', ':distribution')
+ (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), 'main', ':distribution'),
]
- bwc.wireCompatible == [v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.1.0')]
- bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.1.0')]
+ bwc.wireCompatible == [v('8.18.0'), v('9.0.0'), v('9.1.0')]
+ bwc.indexCompatible == [v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.18.0'), v('9.0.0'), v('9.1.0')]
}
- def "current version is staged major"() {
+ def "current version is new minor with single bugfix"() {
given:
- addVersion('8.14.0', '9.9.0')
- addVersion('8.14.1', '9.9.0')
- addVersion('8.14.2', '9.9.0')
- addVersion('8.15.0', '9.9.0')
- addVersion('8.15.1', '9.9.0')
- addVersion('8.15.2', '9.9.0')
+ addVersion('7.17.10', '8.9.0')
addVersion('8.16.0', '9.10.0')
addVersion('8.16.1', '9.10.0')
addVersion('8.17.0', '9.10.0')
- addVersion('8.17.1', '9.10.0')
+ addVersion('8.18.0', '9.10.0')
addVersion('9.0.0', '10.0.0')
+ addVersion('9.0.1', '10.0.0')
+ addVersion('9.1.0', '10.0.0')
when:
- def bwc = new BwcVersions(versionLines, v('9.0.0'))
+ def bwc = new BwcVersions(versionLines, v('9.1.0'), ['main', '9.0', '8.18'])
def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] }
then:
unreleased == [
- (v('8.17.1')): new UnreleasedVersionInfo(v('8.17.1'), '8.17', ':distribution:bwc:bugfix'),
- (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), 'main', ':distribution'),
+ (v('8.18.0')): new UnreleasedVersionInfo(v('8.18.0'), '8.18', ':distribution:bwc:maintenance'),
+ (v('9.0.1')): new UnreleasedVersionInfo(v('9.0.1'), '9.0', ':distribution:bwc:bugfix'),
+ (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), 'main', ':distribution'),
]
- bwc.wireCompatible == [v('8.17.0'), v('8.17.1'), v('9.0.0')]
- bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('9.0.0')]
+ bwc.wireCompatible == [v('8.18.0'), v('9.0.0'), v('9.0.1'), v('9.1.0')]
+ bwc.indexCompatible == [v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.18.0'), v('9.0.0'), v('9.0.1'), v('9.1.0')]
}
- def "current version is major with unreleased next minor"() {
+ def "current version is new minor with single bugfix and staged minor"() {
given:
+ addVersion('7.17.10', '8.9.0')
+ addVersion('8.16.0', '9.10.0')
+ addVersion('8.16.1', '9.10.0')
+ addVersion('8.17.0', '9.10.0')
+ addVersion('8.18.0', '9.10.0')
+ addVersion('9.0.0', '10.0.0')
+ addVersion('9.0.1', '10.0.0')
+ addVersion('9.1.0', '10.0.0')
+ addVersion('9.2.0', '10.0.0')
+
+ when:
+ def bwc = new BwcVersions(versionLines, v('9.2.0'), ['main', '9.1', '9.0', '8.18'])
+ def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] }
+
+ then:
+ unreleased == [
+ (v('8.18.0')): new UnreleasedVersionInfo(v('8.18.0'), '8.18', ':distribution:bwc:maintenance'),
+ (v('9.0.1')): new UnreleasedVersionInfo(v('9.0.1'), '9.0', ':distribution:bwc:bugfix'),
+ (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), '9.1', ':distribution:bwc:staged'),
+ (v('9.2.0')): new UnreleasedVersionInfo(v('9.2.0'), 'main', ':distribution'),
+ ]
+ bwc.wireCompatible == [v('8.18.0'), v('9.0.0'), v('9.0.1'), v('9.1.0'), v('9.2.0')]
+ bwc.indexCompatible == [v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.18.0'), v('9.0.0'), v('9.0.1'), v('9.1.0'), v('9.2.0')]
+ }
+
+ def "current version is next minor"() {
+ given:
+ addVersion('7.16.3', '8.9.0')
+ addVersion('7.17.0', '8.9.0')
+ addVersion('7.17.1', '8.9.0')
addVersion('8.14.0', '9.9.0')
addVersion('8.14.1', '9.9.0')
addVersion('8.14.2', '9.9.0')
@@ -113,24 +167,29 @@ class BwcVersionsSpec extends Specification {
addVersion('8.16.0', '9.10.0')
addVersion('8.16.1', '9.10.0')
addVersion('8.17.0', '9.10.0')
- addVersion('9.0.0', '10.0.0')
+ addVersion('8.17.1', '9.10.0')
+ addVersion('8.18.0', '9.10.0')
when:
- def bwc = new BwcVersions(versionLines, v('9.0.0'))
+ def bwc = new BwcVersions(versionLines, v('8.18.0'), ['main', '8.x', '8.17', '8.16', '7.17'])
def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] }
then:
unreleased == [
- (v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution:bwc:bugfix'),
- (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.x', ':distribution:bwc:minor'),
- (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), 'main', ':distribution'),
+ (v('7.17.1')): new UnreleasedVersionInfo(v('7.17.1'), '7.17', ':distribution:bwc:maintenance'),
+ (v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution:bwc:bugfix2'),
+ (v('8.17.1')): new UnreleasedVersionInfo(v('8.17.1'), '8.17', ':distribution:bwc:bugfix'),
+ (v('8.18.0')): new UnreleasedVersionInfo(v('8.18.0'), '8.x', ':distribution'),
]
- bwc.wireCompatible == [v('8.17.0'), v('9.0.0')]
- bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('9.0.0')]
+ bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('8.18.0')]
+ bwc.indexCompatible == [v('7.16.3'), v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('8.18.0')]
}
- def "current version is major with staged next minor"() {
+ def "current version is new minor with staged minor"() {
given:
+ addVersion('7.16.3', '8.9.0')
+ addVersion('7.17.0', '8.9.0')
+ addVersion('7.17.1', '8.9.0')
addVersion('8.14.0', '9.9.0')
addVersion('8.14.1', '9.9.0')
addVersion('8.14.2', '9.9.0')
@@ -138,26 +197,31 @@ class BwcVersionsSpec extends Specification {
addVersion('8.15.1', '9.9.0')
addVersion('8.15.2', '9.9.0')
addVersion('8.16.0', '9.10.0')
+ addVersion('8.16.1', '9.10.0')
addVersion('8.17.0', '9.10.0')
- addVersion('9.0.0', '10.0.0')
+ addVersion('8.18.0', '9.10.0')
when:
- def bwc = new BwcVersions(versionLines, v('9.0.0'))
+ def bwc = new BwcVersions(versionLines, v('8.18.0'), ['main', '8.x', '8.17', '8.16', '8.15', '7.17'])
def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] }
then:
unreleased == [
- (v('8.15.2')): new UnreleasedVersionInfo(v('8.15.2'), '8.15', ':distribution:bwc:bugfix'),
- (v('8.16.0')): new UnreleasedVersionInfo(v('8.16.0'), '8.16', ':distribution:bwc:staged'),
- (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.x', ':distribution:bwc:minor'),
- (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), 'main', ':distribution'),
+ (v('7.17.1')): new UnreleasedVersionInfo(v('7.17.1'), '7.17', ':distribution:bwc:maintenance'),
+ (v('8.15.2')): new UnreleasedVersionInfo(v('8.15.2'), '8.15', ':distribution:bwc:bugfix2'),
+ (v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution:bwc:bugfix'),
+ (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.17', ':distribution:bwc:staged'),
+ (v('8.18.0')): new UnreleasedVersionInfo(v('8.18.0'), '8.x', ':distribution'),
]
- bwc.wireCompatible == [v('8.17.0'), v('9.0.0')]
- bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.17.0'), v('9.0.0')]
+ bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.18.0')]
+ bwc.indexCompatible == [v('7.16.3'), v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.18.0')]
}
- def "current version is next bugfix"() {
+ def "current version is first bugfix"() {
given:
+ addVersion('7.16.3', '8.9.0')
+ addVersion('7.17.0', '8.9.0')
+ addVersion('7.17.1', '8.9.0')
addVersion('8.14.0', '9.9.0')
addVersion('8.14.1', '9.9.0')
addVersion('8.14.2', '9.9.0')
@@ -166,52 +230,44 @@ class BwcVersionsSpec extends Specification {
addVersion('8.15.2', '9.9.0')
addVersion('8.16.0', '9.10.0')
addVersion('8.16.1', '9.10.0')
- addVersion('8.17.0', '9.10.0')
- addVersion('8.17.1', '9.10.0')
- addVersion('9.0.0', '10.0.0')
- addVersion('9.0.1', '10.0.0')
when:
- def bwc = new BwcVersions(versionLines, v('9.0.1'))
+ def bwc = new BwcVersions(versionLines, v('8.16.1'), ['main', '8.x', '8.17', '8.16', '8.15', '7.17'])
def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] }
then:
unreleased == [
- (v('8.17.1')): new UnreleasedVersionInfo(v('8.17.1'), '8.17', ':distribution:bwc:maintenance'),
- (v('9.0.1')): new UnreleasedVersionInfo(v('9.0.1'), 'main', ':distribution'),
+ (v('7.17.1')): new UnreleasedVersionInfo(v('7.17.1'), '7.17', ':distribution:bwc:maintenance'),
+ (v('8.15.2')): new UnreleasedVersionInfo(v('8.15.2'), '8.15', ':distribution:bwc:bugfix'),
+ (v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution'),
]
- bwc.wireCompatible == [v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.0.1')]
- bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.0.1')]
+ bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1')]
+ bwc.indexCompatible == [v('7.16.3'), v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1')]
}
- def "current version is next minor with no staged releases"() {
+ def "current version is second bugfix"() {
given:
+ addVersion('7.16.3', '8.9.0')
+ addVersion('7.17.0', '8.9.0')
+ addVersion('7.17.1', '8.9.0')
addVersion('8.14.0', '9.9.0')
addVersion('8.14.1', '9.9.0')
addVersion('8.14.2', '9.9.0')
addVersion('8.15.0', '9.9.0')
addVersion('8.15.1', '9.9.0')
addVersion('8.15.2', '9.9.0')
- addVersion('8.16.0', '9.10.0')
- addVersion('8.16.1', '9.10.0')
- addVersion('8.17.0', '9.10.0')
- addVersion('8.17.1', '9.10.0')
- addVersion('9.0.0', '10.0.0')
- addVersion('9.0.1', '10.0.0')
- addVersion('9.1.0', '10.1.0')
when:
- def bwc = new BwcVersions(versionLines, v('9.1.0'))
+ def bwc = new BwcVersions(versionLines, v('8.15.2'), ['main', '8.x', '8.17', '8.16', '8.15', '7.17'])
def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] }
then:
unreleased == [
- (v('8.17.1')): new UnreleasedVersionInfo(v('8.17.1'), '8.17', ':distribution:bwc:maintenance'),
- (v('9.0.1')): new UnreleasedVersionInfo(v('9.0.1'), '9.0', ':distribution:bwc:bugfix'),
- (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), 'main', ':distribution')
+ (v('7.17.1')): new UnreleasedVersionInfo(v('7.17.1'), '7.17', ':distribution:bwc:maintenance'),
+ (v('8.15.2')): new UnreleasedVersionInfo(v('8.15.2'), '8.15', ':distribution'),
]
- bwc.wireCompatible == [v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.0.1'), v('9.1.0')]
- bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.0.1'), v('9.1.0')]
+ bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2')]
+ bwc.indexCompatible == [v('7.16.3'), v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2')]
}
private void addVersion(String elasticsearch, String lucene) {
diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java
index 639dec280ae9..7512fa20814c 100644
--- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java
+++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java
@@ -16,6 +16,7 @@
import java.io.File;
import java.util.Arrays;
+import java.util.List;
public class AbstractDistributionDownloadPluginTests {
protected static Project rootProject;
@@ -28,22 +29,27 @@ public class AbstractDistributionDownloadPluginTests {
protected static final Version BWC_STAGED_VERSION = Version.fromString("1.0.0");
protected static final Version BWC_BUGFIX_VERSION = Version.fromString("1.0.1");
protected static final Version BWC_MAINTENANCE_VERSION = Version.fromString("0.90.1");
+ protected static final List DEVELOPMENT_BRANCHES = Arrays.asList("main", "1.1", "1.0", "0.90");
protected static final BwcVersions BWC_MINOR = new BwcVersions(
BWC_MAJOR_VERSION,
- Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION)
+ Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION),
+ DEVELOPMENT_BRANCHES
);
protected static final BwcVersions BWC_STAGED = new BwcVersions(
BWC_MAJOR_VERSION,
- Arrays.asList(BWC_MAINTENANCE_VERSION, BWC_STAGED_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION)
+ Arrays.asList(BWC_MAINTENANCE_VERSION, BWC_STAGED_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION),
+ DEVELOPMENT_BRANCHES
);
protected static final BwcVersions BWC_BUGFIX = new BwcVersions(
BWC_MAJOR_VERSION,
- Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION)
+ Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION),
+ DEVELOPMENT_BRANCHES
);
protected static final BwcVersions BWC_MAINTENANCE = new BwcVersions(
BWC_MINOR_VERSION,
- Arrays.asList(BWC_MAINTENANCE_VERSION, BWC_BUGFIX_VERSION, BWC_MINOR_VERSION)
+ Arrays.asList(BWC_MAINTENANCE_VERSION, BWC_BUGFIX_VERSION, BWC_MINOR_VERSION),
+ DEVELOPMENT_BRANCHES
);
protected static String projectName(String base, boolean bundledJdk) {
diff --git a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy
index f3f8e4703eba..07214b5fbf84 100644
--- a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy
+++ b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy
@@ -156,12 +156,12 @@ abstract class AbstractGradleFuncTest extends Specification {
File internalBuild(
List extraPlugins = [],
- String bugfix = "7.15.2",
- String bugfixLucene = "8.9.0",
- String staged = "7.16.0",
- String stagedLucene = "8.10.0",
- String minor = "8.0.0",
- String minorLucene = "9.0.0"
+ String maintenance = "7.16.10",
+ String bugfix2 = "8.1.3",
+ String bugfix = "8.2.1",
+ String staged = "8.3.0",
+ String minor = "8.4.0",
+ String current = "9.0.0"
) {
buildFile << """plugins {
id 'elasticsearch.global-build-info'
@@ -172,15 +172,17 @@ abstract class AbstractGradleFuncTest extends Specification {
import org.elasticsearch.gradle.internal.BwcVersions
import org.elasticsearch.gradle.Version
- Version currentVersion = Version.fromString("8.1.0")
+ Version currentVersion = Version.fromString("${current}")
def versionList = [
+ Version.fromString("$maintenance"),
+ Version.fromString("$bugfix2"),
Version.fromString("$bugfix"),
Version.fromString("$staged"),
Version.fromString("$minor"),
currentVersion
]
- BwcVersions versions = new BwcVersions(currentVersion, versionList)
+ BwcVersions versions = new BwcVersions(currentVersion, versionList, ['main', '8.x', '8.3', '8.2', '8.1', '7.16'])
buildParams.getBwcVersionsProperty().set(versions)
"""
}
diff --git a/distribution/bwc/bugfix2/build.gradle b/distribution/bwc/bugfix2/build.gradle
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/settings.gradle b/settings.gradle
index 4722fc311480..747fbb3e439f 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -73,6 +73,7 @@ List projects = [
'distribution:packages:aarch64-rpm',
'distribution:packages:rpm',
'distribution:bwc:bugfix',
+ 'distribution:bwc:bugfix2',
'distribution:bwc:maintenance',
'distribution:bwc:minor',
'distribution:bwc:staged',
From 584918e39d5f436a20f010163a3ae44fa99046ca Mon Sep 17 00:00:00 2001
From: Armin Braun
Date: Thu, 5 Dec 2024 20:27:42 +0100
Subject: [PATCH 13/26] Save duplicate REST client in ESRestTestCase (#117910)
I debugged some tests today and noticed that these two clients
are the same in almost all cases, no need to use extra connections.
Might give usa small speedup for these tests that tend to be quite slow
relative to the node client based tests.
---
.../java/org/elasticsearch/test/rest/ESRestTestCase.java | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
index b4f4243fb90f..4428afaaeabe 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
@@ -333,8 +333,11 @@ public void initClient() throws IOException {
assert testFeatureServiceInitialized() == false;
clusterHosts = parseClusterHosts(getTestRestCluster());
logger.info("initializing REST clients against {}", clusterHosts);
- client = buildClient(restClientSettings(), clusterHosts.toArray(new HttpHost[clusterHosts.size()]));
- adminClient = buildClient(restAdminSettings(), clusterHosts.toArray(new HttpHost[clusterHosts.size()]));
+ var clientSettings = restClientSettings();
+ var adminSettings = restAdminSettings();
+ var hosts = clusterHosts.toArray(new HttpHost[0]);
+ client = buildClient(clientSettings, hosts);
+ adminClient = clientSettings.equals(adminSettings) ? client : buildClient(adminSettings, hosts);
availableFeatures = EnumSet.of(ProductFeature.LEGACY_TEMPLATES);
Set versions = new HashSet<>();
From fd81c5111878d4cdbbf299976377a3fffd41cb29 Mon Sep 17 00:00:00 2001
From: Sam Xiao
Date: Thu, 5 Dec 2024 14:32:25 -0500
Subject: [PATCH 14/26] Unmute BWC tests FullClusterRestartIT (#118038)
---
muted-tests.yml | 6 ------
1 file changed, 6 deletions(-)
diff --git a/muted-tests.yml b/muted-tests.yml
index a09e46415fdc..ee5e3dd42236 100644
--- a/muted-tests.yml
+++ b/muted-tests.yml
@@ -2,12 +2,6 @@ tests:
- class: "org.elasticsearch.client.RestClientSingleHostIntegTests"
issue: "https://github.com/elastic/elasticsearch/issues/102717"
method: "testRequestResetAndAbort"
-- class: org.elasticsearch.xpack.restart.FullClusterRestartIT
- method: testSingleDoc {cluster=UPGRADED}
- issue: https://github.com/elastic/elasticsearch/issues/111434
-- class: org.elasticsearch.xpack.restart.FullClusterRestartIT
- method: testDataStreams {cluster=UPGRADED}
- issue: https://github.com/elastic/elasticsearch/issues/111448
- class: org.elasticsearch.smoketest.WatcherYamlRestIT
method: test {p0=watcher/usage/10_basic/Test watcher usage stats output}
issue: https://github.com/elastic/elasticsearch/issues/112189
From 62d94f2920d4e315bcd2867b791022e8a4c33b9f Mon Sep 17 00:00:00 2001
From: Mark Vieira
Date: Thu, 5 Dec 2024 13:41:43 -0800
Subject: [PATCH 15/26] Remove released vs unreleased distinction from
VersionUtils (#118108)
---
.../java/org/elasticsearch/VersionTests.java | 41 +--
test/framework/build.gradle | 1 -
.../org/elasticsearch/test/VersionUtils.java | 139 +--------
.../elasticsearch/test/VersionUtilsTests.java | 273 ++----------------
4 files changed, 30 insertions(+), 424 deletions(-)
diff --git a/server/src/test/java/org/elasticsearch/VersionTests.java b/server/src/test/java/org/elasticsearch/VersionTests.java
index 0b35a3cc23c1..5e10a7d37aea 100644
--- a/server/src/test/java/org/elasticsearch/VersionTests.java
+++ b/server/src/test/java/org/elasticsearch/VersionTests.java
@@ -179,8 +179,7 @@ public void testParseVersion() {
}
public void testAllVersionsMatchId() throws Exception {
- final Set releasedVersions = new HashSet<>(VersionUtils.allReleasedVersions());
- final Set unreleasedVersions = new HashSet<>(VersionUtils.allUnreleasedVersions());
+ final Set versions = new HashSet<>(VersionUtils.allVersions());
Map maxBranchVersions = new HashMap<>();
for (java.lang.reflect.Field field : Version.class.getFields()) {
if (field.getName().matches("_ID")) {
@@ -195,43 +194,15 @@ public void testAllVersionsMatchId() throws Exception {
Version v = (Version) versionConstant.get(null);
logger.debug("Checking {}", v);
- if (field.getName().endsWith("_UNRELEASED")) {
- assertTrue(unreleasedVersions.contains(v));
- } else {
- assertTrue(releasedVersions.contains(v));
- }
+ assertTrue(versions.contains(v));
assertEquals("Version id " + field.getName() + " does not point to " + constantName, v, Version.fromId(versionId));
assertEquals("Version " + constantName + " does not have correct id", versionId, v.id);
String number = v.toString();
assertEquals("V_" + number.replace('.', '_'), constantName);
-
- // only the latest version for a branch should be a snapshot (ie unreleased)
- String branchName = "" + v.major + "." + v.minor;
- Version maxBranchVersion = maxBranchVersions.get(branchName);
- if (maxBranchVersion == null) {
- maxBranchVersions.put(branchName, v);
- } else if (v.after(maxBranchVersion)) {
- if (v == Version.CURRENT) {
- // Current is weird - it counts as released even though it shouldn't.
- continue;
- }
- assertFalse(
- "Version " + maxBranchVersion + " cannot be a snapshot because version " + v + " exists",
- VersionUtils.allUnreleasedVersions().contains(maxBranchVersion)
- );
- maxBranchVersions.put(branchName, v);
- }
}
}
}
- public static void assertUnknownVersion(Version version) {
- assertFalse(
- "Version " + version + " has been releaed don't use a new instance of this version",
- VersionUtils.allReleasedVersions().contains(version)
- );
- }
-
public void testIsCompatible() {
assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()));
assertFalse(isCompatible(Version.V_7_0_0, Version.V_8_0_0));
@@ -279,14 +250,6 @@ public boolean isCompatible(Version left, Version right) {
return result;
}
- // This exists because 5.1.0 was never released due to a mistake in the release process.
- // This verifies that we never declare the version as "released" accidentally.
- // It would never pass qa tests later on, but those come very far in the build and this is quick to check now.
- public void testUnreleasedVersion() {
- Version VERSION_5_1_0_UNRELEASED = Version.fromString("5.1.0");
- VersionTests.assertUnknownVersion(VERSION_5_1_0_UNRELEASED);
- }
-
public void testIllegalMinorAndPatchNumbers() {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> Version.fromString("8.2.999"));
assertThat(
diff --git a/test/framework/build.gradle b/test/framework/build.gradle
index 126b95041da1..c7e08eb3cdfa 100644
--- a/test/framework/build.gradle
+++ b/test/framework/build.gradle
@@ -86,7 +86,6 @@ tasks.named("thirdPartyAudit").configure {
tasks.named("test").configure {
systemProperty 'tests.gradle_index_compat_versions', buildParams.bwcVersions.indexCompatible.join(',')
systemProperty 'tests.gradle_wire_compat_versions', buildParams.bwcVersions.wireCompatible.join(',')
- systemProperty 'tests.gradle_unreleased_versions', buildParams.bwcVersions.unreleased.join(',')
}
tasks.register("integTest", Test) {
diff --git a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java
index d561c5512b61..8b7ab620774b 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java
@@ -12,132 +12,15 @@
import org.elasticsearch.Build;
import org.elasticsearch.Version;
import org.elasticsearch.core.Nullable;
-import org.elasticsearch.core.Tuple;
-import java.util.ArrayList;
-import java.util.Collections;
import java.util.List;
-import java.util.Map;
import java.util.Optional;
import java.util.Random;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
/** Utilities for selecting versions in tests */
public class VersionUtils {
- /**
- * Sort versions that have backwards compatibility guarantees from
- * those that don't. Doesn't actually check whether or not the versions
- * are released, instead it relies on gradle to have already checked
- * this which it does in {@code :core:verifyVersions}. So long as the
- * rules here match up with the rules in gradle then this should
- * produce sensible results.
- * @return a tuple containing versions with backwards compatibility
- * guarantees in v1 and versions without the guranteees in v2
- */
- static Tuple, List> resolveReleasedVersions(Version current, Class> versionClass) {
- // group versions into major version
- Map> majorVersions = Version.getDeclaredVersions(versionClass)
- .stream()
- .collect(Collectors.groupingBy(v -> (int) v.major));
- // this breaks b/c 5.x is still in version list but master doesn't care about it!
- // assert majorVersions.size() == 2;
- // TODO: remove oldVersions, we should only ever have 2 majors in Version
- List> oldVersions = splitByMinor(majorVersions.getOrDefault((int) current.major - 2, Collections.emptyList()));
- List> previousMajor = splitByMinor(majorVersions.get((int) current.major - 1));
- List> currentMajor = splitByMinor(majorVersions.get((int) current.major));
-
- List unreleasedVersions = new ArrayList<>();
- final List> stableVersions;
- if (currentMajor.size() == 1) {
- // on master branch
- stableVersions = previousMajor;
- // remove current
- moveLastToUnreleased(currentMajor, unreleasedVersions);
- } else {
- // on a stable or release branch, ie N.x
- stableVersions = currentMajor;
- // remove the next maintenance bugfix
- moveLastToUnreleased(previousMajor, unreleasedVersions);
- }
-
- // remove next minor
- Version lastMinor = moveLastToUnreleased(stableVersions, unreleasedVersions);
- if (lastMinor.revision == 0) {
- if (stableVersions.get(stableVersions.size() - 1).size() == 1) {
- // a minor is being staged, which is also unreleased
- moveLastToUnreleased(stableVersions, unreleasedVersions);
- }
- // remove the next bugfix
- if (stableVersions.isEmpty() == false) {
- moveLastToUnreleased(stableVersions, unreleasedVersions);
- }
- }
-
- // If none of the previous major was released, then the last minor and bugfix of the old version was not released either.
- if (previousMajor.isEmpty()) {
- assert currentMajor.isEmpty() : currentMajor;
- // minor of the old version is being staged
- moveLastToUnreleased(oldVersions, unreleasedVersions);
- // bugix of the old version is also being staged
- moveLastToUnreleased(oldVersions, unreleasedVersions);
- }
- List releasedVersions = Stream.of(oldVersions, previousMajor, currentMajor)
- .flatMap(List::stream)
- .flatMap(List::stream)
- .collect(Collectors.toList());
- Collections.sort(unreleasedVersions); // we add unreleased out of order, so need to sort here
- return new Tuple<>(Collections.unmodifiableList(releasedVersions), Collections.unmodifiableList(unreleasedVersions));
- }
-
- // split the given versions into sub lists grouped by minor version
- private static List> splitByMinor(List versions) {
- Map> byMinor = versions.stream().collect(Collectors.groupingBy(v -> (int) v.minor));
- return byMinor.entrySet().stream().sorted(Map.Entry.comparingByKey()).map(Map.Entry::getValue).collect(Collectors.toList());
- }
-
- // move the last version of the last minor in versions to the unreleased versions
- private static Version moveLastToUnreleased(List> versions, List unreleasedVersions) {
- List lastMinor = new ArrayList<>(versions.get(versions.size() - 1));
- Version lastVersion = lastMinor.remove(lastMinor.size() - 1);
- if (lastMinor.isEmpty()) {
- versions.remove(versions.size() - 1);
- } else {
- versions.set(versions.size() - 1, lastMinor);
- }
- unreleasedVersions.add(lastVersion);
- return lastVersion;
- }
-
- private static final List RELEASED_VERSIONS;
- private static final List UNRELEASED_VERSIONS;
- private static final List ALL_VERSIONS;
-
- static {
- Tuple, List> versions = resolveReleasedVersions(Version.CURRENT, Version.class);
- RELEASED_VERSIONS = versions.v1();
- UNRELEASED_VERSIONS = versions.v2();
- List allVersions = new ArrayList<>(RELEASED_VERSIONS.size() + UNRELEASED_VERSIONS.size());
- allVersions.addAll(RELEASED_VERSIONS);
- allVersions.addAll(UNRELEASED_VERSIONS);
- Collections.sort(allVersions);
- ALL_VERSIONS = Collections.unmodifiableList(allVersions);
- }
-
- /**
- * Returns an immutable, sorted list containing all released versions.
- */
- public static List allReleasedVersions() {
- return RELEASED_VERSIONS;
- }
-
- /**
- * Returns an immutable, sorted list containing all unreleased versions.
- */
- public static List allUnreleasedVersions() {
- return UNRELEASED_VERSIONS;
- }
+ private static final List ALL_VERSIONS = Version.getDeclaredVersions(Version.class);
/**
* Returns an immutable, sorted list containing all versions, both released and unreleased.
@@ -147,16 +30,16 @@ public static List allVersions() {
}
/**
- * Get the released version before {@code version}.
+ * Get the version before {@code version}.
*/
public static Version getPreviousVersion(Version version) {
- for (int i = RELEASED_VERSIONS.size() - 1; i >= 0; i--) {
- Version v = RELEASED_VERSIONS.get(i);
+ for (int i = ALL_VERSIONS.size() - 1; i >= 0; i--) {
+ Version v = ALL_VERSIONS.get(i);
if (v.before(version)) {
return v;
}
}
- throw new IllegalArgumentException("couldn't find any released versions before [" + version + "]");
+ throw new IllegalArgumentException("couldn't find any versions before [" + version + "]");
}
/**
@@ -169,22 +52,22 @@ public static Version getPreviousVersion() {
}
/**
- * Returns the released {@link Version} before the {@link Version#CURRENT}
+ * Returns the {@link Version} before the {@link Version#CURRENT}
* where the minor version is less than the currents minor version.
*/
public static Version getPreviousMinorVersion() {
- for (int i = RELEASED_VERSIONS.size() - 1; i >= 0; i--) {
- Version v = RELEASED_VERSIONS.get(i);
+ for (int i = ALL_VERSIONS.size() - 1; i >= 0; i--) {
+ Version v = ALL_VERSIONS.get(i);
if (v.minor < Version.CURRENT.minor || v.major < Version.CURRENT.major) {
return v;
}
}
- throw new IllegalArgumentException("couldn't find any released versions of the minor before [" + Build.current().version() + "]");
+ throw new IllegalArgumentException("couldn't find any versions of the minor before [" + Build.current().version() + "]");
}
- /** Returns the oldest released {@link Version} */
+ /** Returns the oldest {@link Version} */
public static Version getFirstVersion() {
- return RELEASED_VERSIONS.get(0);
+ return ALL_VERSIONS.get(0);
}
/** Returns a random {@link Version} from all available versions. */
diff --git a/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java b/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java
index e0013e06f324..5ae7e5640fc9 100644
--- a/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java
+++ b/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java
@@ -9,19 +9,11 @@
package org.elasticsearch.test;
import org.elasticsearch.Version;
-import org.elasticsearch.core.Booleans;
-import org.elasticsearch.core.Tuple;
import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
import java.util.List;
-import java.util.Set;
import static org.elasticsearch.Version.fromId;
-import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.greaterThanOrEqualTo;
-import static org.hamcrest.Matchers.lessThanOrEqualTo;
/**
* Tests VersionUtils. Note: this test should remain unchanged across major versions
@@ -30,7 +22,7 @@
public class VersionUtilsTests extends ESTestCase {
public void testAllVersionsSorted() {
- List allVersions = VersionUtils.allReleasedVersions();
+ List allVersions = VersionUtils.allVersions();
for (int i = 0, j = 1; j < allVersions.size(); ++i, ++j) {
assertTrue(allVersions.get(i).before(allVersions.get(j)));
}
@@ -58,9 +50,9 @@ public void testRandomVersionBetween() {
got = VersionUtils.randomVersionBetween(random(), null, fromId(7000099));
assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
assertTrue(got.onOrBefore(fromId(7000099)));
- got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.allReleasedVersions().get(0));
+ got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.allVersions().get(0));
assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
- assertTrue(got.onOrBefore(VersionUtils.allReleasedVersions().get(0)));
+ assertTrue(got.onOrBefore(VersionUtils.allVersions().get(0)));
// unbounded upper
got = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), null);
@@ -83,265 +75,34 @@ public void testRandomVersionBetween() {
assertEquals(got, VersionUtils.getFirstVersion());
got = VersionUtils.randomVersionBetween(random(), Version.CURRENT, null);
assertEquals(got, Version.CURRENT);
-
- if (Booleans.parseBoolean(System.getProperty("build.snapshot", "true"))) {
- // max or min can be an unreleased version
- final Version unreleased = randomFrom(VersionUtils.allUnreleasedVersions());
- assertThat(VersionUtils.randomVersionBetween(random(), null, unreleased), lessThanOrEqualTo(unreleased));
- assertThat(VersionUtils.randomVersionBetween(random(), unreleased, null), greaterThanOrEqualTo(unreleased));
- assertEquals(unreleased, VersionUtils.randomVersionBetween(random(), unreleased, unreleased));
- }
- }
-
- public static class TestReleaseBranch {
- public static final Version V_4_0_0 = Version.fromString("4.0.0");
- public static final Version V_4_0_1 = Version.fromString("4.0.1");
- public static final Version V_5_3_0 = Version.fromString("5.3.0");
- public static final Version V_5_3_1 = Version.fromString("5.3.1");
- public static final Version V_5_3_2 = Version.fromString("5.3.2");
- public static final Version V_5_4_0 = Version.fromString("5.4.0");
- public static final Version V_5_4_1 = Version.fromString("5.4.1");
- public static final Version CURRENT = V_5_4_1;
- }
-
- public void testResolveReleasedVersionsForReleaseBranch() {
- Tuple, List> t = VersionUtils.resolveReleasedVersions(TestReleaseBranch.CURRENT, TestReleaseBranch.class);
- List released = t.v1();
- List unreleased = t.v2();
-
- assertThat(
- released,
- equalTo(
- Arrays.asList(
- TestReleaseBranch.V_4_0_0,
- TestReleaseBranch.V_5_3_0,
- TestReleaseBranch.V_5_3_1,
- TestReleaseBranch.V_5_3_2,
- TestReleaseBranch.V_5_4_0
- )
- )
- );
- assertThat(unreleased, equalTo(Arrays.asList(TestReleaseBranch.V_4_0_1, TestReleaseBranch.V_5_4_1)));
- }
-
- public static class TestStableBranch {
- public static final Version V_4_0_0 = Version.fromString("4.0.0");
- public static final Version V_4_0_1 = Version.fromString("4.0.1");
- public static final Version V_5_0_0 = Version.fromString("5.0.0");
- public static final Version V_5_0_1 = Version.fromString("5.0.1");
- public static final Version V_5_0_2 = Version.fromString("5.0.2");
- public static final Version V_5_1_0 = Version.fromString("5.1.0");
- public static final Version CURRENT = V_5_1_0;
- }
-
- public void testResolveReleasedVersionsForUnreleasedStableBranch() {
- Tuple, List> t = VersionUtils.resolveReleasedVersions(TestStableBranch.CURRENT, TestStableBranch.class);
- List released = t.v1();
- List unreleased = t.v2();
-
- assertThat(released, equalTo(Arrays.asList(TestStableBranch.V_4_0_0, TestStableBranch.V_5_0_0, TestStableBranch.V_5_0_1)));
- assertThat(unreleased, equalTo(Arrays.asList(TestStableBranch.V_4_0_1, TestStableBranch.V_5_0_2, TestStableBranch.V_5_1_0)));
- }
-
- public static class TestStableBranchBehindStableBranch {
- public static final Version V_4_0_0 = Version.fromString("4.0.0");
- public static final Version V_4_0_1 = Version.fromString("4.0.1");
- public static final Version V_5_3_0 = Version.fromString("5.3.0");
- public static final Version V_5_3_1 = Version.fromString("5.3.1");
- public static final Version V_5_3_2 = Version.fromString("5.3.2");
- public static final Version V_5_4_0 = Version.fromString("5.4.0");
- public static final Version V_5_5_0 = Version.fromString("5.5.0");
- public static final Version CURRENT = V_5_5_0;
- }
-
- public void testResolveReleasedVersionsForStableBranchBehindStableBranch() {
- Tuple, List> t = VersionUtils.resolveReleasedVersions(
- TestStableBranchBehindStableBranch.CURRENT,
- TestStableBranchBehindStableBranch.class
- );
- List released = t.v1();
- List unreleased = t.v2();
-
- assertThat(
- released,
- equalTo(
- Arrays.asList(
- TestStableBranchBehindStableBranch.V_4_0_0,
- TestStableBranchBehindStableBranch.V_5_3_0,
- TestStableBranchBehindStableBranch.V_5_3_1
- )
- )
- );
- assertThat(
- unreleased,
- equalTo(
- Arrays.asList(
- TestStableBranchBehindStableBranch.V_4_0_1,
- TestStableBranchBehindStableBranch.V_5_3_2,
- TestStableBranchBehindStableBranch.V_5_4_0,
- TestStableBranchBehindStableBranch.V_5_5_0
- )
- )
- );
- }
-
- public static class TestUnstableBranch {
- public static final Version V_5_3_0 = Version.fromString("5.3.0");
- public static final Version V_5_3_1 = Version.fromString("5.3.1");
- public static final Version V_5_3_2 = Version.fromString("5.3.2");
- public static final Version V_5_4_0 = Version.fromString("5.4.0");
- public static final Version V_6_0_0 = Version.fromString("6.0.0");
- public static final Version CURRENT = V_6_0_0;
- }
-
- public void testResolveReleasedVersionsForUnstableBranch() {
- Tuple, List> t = VersionUtils.resolveReleasedVersions(TestUnstableBranch.CURRENT, TestUnstableBranch.class);
- List released = t.v1();
- List unreleased = t.v2();
-
- assertThat(released, equalTo(Arrays.asList(TestUnstableBranch.V_5_3_0, TestUnstableBranch.V_5_3_1)));
- assertThat(unreleased, equalTo(Arrays.asList(TestUnstableBranch.V_5_3_2, TestUnstableBranch.V_5_4_0, TestUnstableBranch.V_6_0_0)));
- }
-
- public static class TestNewMajorRelease {
- public static final Version V_5_6_0 = Version.fromString("5.6.0");
- public static final Version V_5_6_1 = Version.fromString("5.6.1");
- public static final Version V_5_6_2 = Version.fromString("5.6.2");
- public static final Version V_6_0_0 = Version.fromString("6.0.0");
- public static final Version V_6_0_1 = Version.fromString("6.0.1");
- public static final Version CURRENT = V_6_0_1;
- }
-
- public void testResolveReleasedVersionsAtNewMajorRelease() {
- Tuple, List> t = VersionUtils.resolveReleasedVersions(
- TestNewMajorRelease.CURRENT,
- TestNewMajorRelease.class
- );
- List released = t.v1();
- List unreleased = t.v2();
-
- assertThat(released, equalTo(Arrays.asList(TestNewMajorRelease.V_5_6_0, TestNewMajorRelease.V_5_6_1, TestNewMajorRelease.V_6_0_0)));
- assertThat(unreleased, equalTo(Arrays.asList(TestNewMajorRelease.V_5_6_2, TestNewMajorRelease.V_6_0_1)));
- }
-
- public static class TestVersionBumpIn6x {
- public static final Version V_5_6_0 = Version.fromString("5.6.0");
- public static final Version V_5_6_1 = Version.fromString("5.6.1");
- public static final Version V_5_6_2 = Version.fromString("5.6.2");
- public static final Version V_6_0_0 = Version.fromString("6.0.0");
- public static final Version V_6_0_1 = Version.fromString("6.0.1");
- public static final Version V_6_1_0 = Version.fromString("6.1.0");
- public static final Version CURRENT = V_6_1_0;
- }
-
- public void testResolveReleasedVersionsAtVersionBumpIn6x() {
- Tuple, List> t = VersionUtils.resolveReleasedVersions(
- TestVersionBumpIn6x.CURRENT,
- TestVersionBumpIn6x.class
- );
- List released = t.v1();
- List unreleased = t.v2();
-
- assertThat(released, equalTo(Arrays.asList(TestVersionBumpIn6x.V_5_6_0, TestVersionBumpIn6x.V_5_6_1, TestVersionBumpIn6x.V_6_0_0)));
- assertThat(
- unreleased,
- equalTo(Arrays.asList(TestVersionBumpIn6x.V_5_6_2, TestVersionBumpIn6x.V_6_0_1, TestVersionBumpIn6x.V_6_1_0))
- );
- }
-
- public static class TestNewMinorBranchIn6x {
- public static final Version V_5_6_0 = Version.fromString("5.6.0");
- public static final Version V_5_6_1 = Version.fromString("5.6.1");
- public static final Version V_5_6_2 = Version.fromString("5.6.2");
- public static final Version V_6_0_0 = Version.fromString("6.0.0");
- public static final Version V_6_0_1 = Version.fromString("6.0.1");
- public static final Version V_6_1_0 = Version.fromString("6.1.0");
- public static final Version V_6_1_1 = Version.fromString("6.1.1");
- public static final Version V_6_1_2 = Version.fromString("6.1.2");
- public static final Version V_6_2_0 = Version.fromString("6.2.0");
- public static final Version CURRENT = V_6_2_0;
- }
-
- public void testResolveReleasedVersionsAtNewMinorBranchIn6x() {
- Tuple