From b3459fdf0f07b3dfe1d8375fbc1db50840f7a8fe Mon Sep 17 00:00:00 2001 From: Liyun Xiu Date: Sat, 12 Oct 2024 02:43:36 +0800 Subject: [PATCH 01/19] Update setting API honors cluster's replica setting as default #14810 (#14948) * Update setting API honors cluster's replica setting as default #14810 Signed-off-by: Liyun Xiu * Update changelog & add one more test case Signed-off-by: Liyun Xiu --------- Signed-off-by: Liyun Xiu Signed-off-by: Daniel Widdis Co-authored-by: Daniel Widdis --- CHANGELOG.md | 1 + .../indices/settings/UpdateSettingsIT.java | 63 +++++++++++++++++++ .../MetadataUpdateSettingsService.java | 18 +++--- 3 files changed, 74 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f5622a177ee6c..2a32d2503bbef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,6 +67,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix search_as_you_type not supporting multi-fields ([#15988](https://github.com/opensearch-project/OpenSearch/pull/15988)) - Avoid infinite loop when `flat_object` field contains invalid token ([#15985](https://github.com/opensearch-project/OpenSearch/pull/15985)) - Fix infinite loop in nested agg ([#15931](https://github.com/opensearch-project/OpenSearch/pull/15931)) +- Fix update settings with null replica not honoring cluster setting bug ([#14948](https://github.com/opensearch-project/OpenSearch/pull/14948)) - Fix race condition in node-join and node-left ([#15521](https://github.com/opensearch-project/OpenSearch/pull/15521)) - Streaming bulk request hangs ([#16158](https://github.com/opensearch-project/OpenSearch/pull/16158)) - Fix warnings from SLF4J on startup when repository-s3 is installed ([#16194](https://github.com/opensearch-project/OpenSearch/pull/16194)) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateSettingsIT.java index 475d0a154a98b..5e90ee3185618 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateSettingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateSettingsIT.java @@ -895,6 +895,69 @@ private void runTestDefaultNumberOfReplicasTest(final boolean closeIndex) { assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(response.getIndexToSettings().get("test")), equalTo(1)); } + public void testNullReplicaUpdate() { + internalCluster().ensureAtLeastNumDataNodes(2); + + // cluster setting + String defaultNumberOfReplica = "3"; + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("cluster.default_number_of_replicas", defaultNumberOfReplica)) + .get() + ); + // create index with number of replicas will ignore default value + assertAcked( + client().admin() + .indices() + .prepareCreate("test") + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "2")) + ); + + String numberOfReplicas = client().admin() + .indices() + .prepareGetSettings("test") + .get() + .getSetting("test", IndexMetadata.SETTING_NUMBER_OF_REPLICAS); + assertEquals("2", numberOfReplicas); + // update setting with null replica will use cluster setting of replica number + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().putNull(IndexMetadata.SETTING_NUMBER_OF_REPLICAS)) + ); + + numberOfReplicas = client().admin() + .indices() + .prepareGetSettings("test") + .get() + .getSetting("test", IndexMetadata.SETTING_NUMBER_OF_REPLICAS); + assertEquals(defaultNumberOfReplica, numberOfReplicas); + + // Clean up cluster setting, then update setting with null replica should pick up default value of 1 + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("cluster.default_number_of_replicas")) + ); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().putNull(IndexMetadata.SETTING_NUMBER_OF_REPLICAS)) + ); + + numberOfReplicas = client().admin() + .indices() + .prepareGetSettings("test") + .get() + .getSetting("test", IndexMetadata.SETTING_NUMBER_OF_REPLICAS); + assertEquals("1", numberOfReplicas); + } + public void testNoopUpdate() { internalCluster().ensureAtLeastNumDataNodes(2); final ClusterService clusterService = internalCluster().getClusterManagerNodeInstance(ClusterService.class); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java index 4e7e31bbb9222..8c350d6b9cef5 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -140,6 +140,7 @@ public void updateSettings( validateRefreshIntervalSettings(normalizedSettings, clusterService.getClusterSettings()); validateTranslogDurabilitySettings(normalizedSettings, clusterService.getClusterSettings(), clusterService.getSettings()); + final int defaultReplicaCount = clusterService.getClusterSettings().get(Metadata.DEFAULT_REPLICA_COUNT_SETTING); Settings.Builder settingsForClosedIndices = Settings.builder(); Settings.Builder settingsForOpenIndices = Settings.builder(); @@ -248,7 +249,10 @@ public ClusterState execute(ClusterState currentState) { } if (IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.exists(openSettings)) { - final int updatedNumberOfReplicas = IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(openSettings); + final int updatedNumberOfReplicas = openSettings.getAsInt( + IndexMetadata.SETTING_NUMBER_OF_REPLICAS, + defaultReplicaCount + ); if (preserveExisting == false) { for (Index index : request.indices()) { if (index.getName().charAt(0) != '.') { @@ -329,15 +333,13 @@ public ClusterState execute(ClusterState currentState) { /* * The setting index.number_of_replicas is special; we require that this setting has a value in the index. When * creating the index, we ensure this by explicitly providing a value for the setting to the default (one) if - * there is a not value provided on the source of the index creation. A user can update this setting though, - * including updating it to null, indicating that they want to use the default value. In this case, we again - * have to provide an explicit value for the setting to the default (one). + * there is no value provided on the source of the index creation or "cluster.default_number_of_replicas" is + * not set. A user can update this setting though, including updating it to null, indicating that they want to + * use the value configured by cluster settings or a default value 1. In this case, we again have to provide + * an explicit value for the setting. */ if (IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.exists(indexSettings) == false) { - indexSettings.put( - IndexMetadata.SETTING_NUMBER_OF_REPLICAS, - IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(Settings.EMPTY) - ); + indexSettings.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, defaultReplicaCount); } Settings finalSettings = indexSettings.build(); indexScopedSettings.validate( From 53c9ddf77c5d050d188fd342035f932600a41608 Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Fri, 11 Oct 2024 15:25:34 -0700 Subject: [PATCH 02/19] Remove ApproximateIndexOrDocValuesQuery (#16273) Looking into the query approximation framework, I realized that we don't actually need ApproximateIndexOrDocValuesQuery. We already have ApproximateScoreQuery that is able to choose between the approximate query and the original query. Assuming the range query is over an indexed field, it is (potentially) possible to approximate. If there are doc values, then the original query will be an IndexOrDocValuesQuery. Otherwise, it will just be a PointRangeQuery. Either way, we can wrap the original query into a generic ApproximateScoreQuery along with the approximate query. The approximation logic doesn't need to be specialized to the IndexOrDocValue case. --------- Signed-off-by: Michael Froh --- CHANGELOG.md | 1 + .../index/mapper/DateFieldMapper.java | 59 ++++----- .../bucket/filterrewrite/Helper.java | 4 +- .../ApproximateIndexOrDocValuesQuery.java | 62 ---------- .../ApproximatePointRangeQuery.java | 3 - .../approximate/ApproximateScoreQuery.java | 4 +- .../index/mapper/DateFieldTypeTests.java | 40 ++++--- ...angeFieldQueryStringQueryBuilderTests.java | 18 +-- .../index/mapper/RangeFieldTypeTests.java | 4 +- .../query/MatchPhraseQueryBuilderTests.java | 4 +- .../query/QueryStringQueryBuilderTests.java | 17 +-- .../index/query/RangeQueryBuilderTests.java | 62 +++++----- ...ApproximateIndexOrDocValuesQueryTests.java | 113 ------------------ 13 files changed, 119 insertions(+), 272 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/search/approximate/ApproximateIndexOrDocValuesQuery.java delete mode 100644 server/src/test/java/org/opensearch/search/approximate/ApproximateIndexOrDocValuesQueryTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a32d2503bbef..43993cd7ca9dd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,6 +54,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remove identity-related feature flagged code from the RestController ([#15430](https://github.com/opensearch-project/OpenSearch/pull/15430)) - Remove Identity FeatureFlag ([#16024](https://github.com/opensearch-project/OpenSearch/pull/16024)) - Ensure RestHandler.Wrapper delegates all implementations to the wrapped handler ([#16154](https://github.com/opensearch-project/OpenSearch/pull/16154)) +- Code cleanup: Remove ApproximateIndexOrDocValuesQuery ([#16273](https://github.com/opensearch-project/OpenSearch/pull/16273)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java index 4698440f08036..7fbb38c47572c 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java @@ -62,8 +62,8 @@ import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.DocValueFormat; -import org.opensearch.search.approximate.ApproximateIndexOrDocValuesQuery; import org.opensearch.search.approximate.ApproximatePointRangeQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.search.lookup.SearchLookup; import java.io.IOException; @@ -113,21 +113,6 @@ public static DateFormatter getDefaultDateTimeFormatter() { : LEGACY_DEFAULT_DATE_TIME_FORMATTER; } - public static Query getDefaultQuery(Query pointRangeQuery, Query dvQuery, String name, long l, long u) { - return FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY_SETTING) - ? new ApproximateIndexOrDocValuesQuery( - pointRangeQuery, - new ApproximatePointRangeQuery(name, pack(new long[] { l }).bytes, pack(new long[] { u }).bytes, new long[] { l }.length) { - @Override - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - }, - dvQuery - ) - : new IndexOrDocValuesQuery(pointRangeQuery, dvQuery); - } - /** * Resolution of the date time * @@ -488,22 +473,42 @@ public Query rangeQuery( } DateMathParser parser = forcedDateParser == null ? dateMathParser : forcedDateParser; return dateRangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, timeZone, parser, context, resolution, (l, u) -> { - Query pointRangeQuery = isSearchable() ? LongPoint.newRangeQuery(name(), l, u) : null; Query dvQuery = hasDocValues() ? SortedNumericDocValuesField.newSlowRangeQuery(name(), l, u) : null; - if (isSearchable() && hasDocValues()) { - Query query = getDefaultQuery(pointRangeQuery, dvQuery, name(), l, u); - if (context.indexSortedOnField(name())) { - query = new IndexSortSortedNumericDocValuesRangeQuery(name(), l, u, query); + if (isSearchable()) { + Query pointRangeQuery = LongPoint.newRangeQuery(name(), l, u); + Query query; + if (dvQuery != null) { + query = new IndexOrDocValuesQuery(pointRangeQuery, dvQuery); + if (context.indexSortedOnField(name())) { + query = new IndexSortSortedNumericDocValuesRangeQuery(name(), l, u, query); + } + } else { + query = pointRangeQuery; + } + if (FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY_SETTING)) { + return new ApproximateScoreQuery( + query, + new ApproximatePointRangeQuery( + name(), + pack(new long[] { l }).bytes, + pack(new long[] { u }).bytes, + new long[] { l }.length + ) { + @Override + protected String toString(int dimension, byte[] value) { + return Long.toString(LongPoint.decodeDimension(value, 0)); + } + } + ); } return query; } - if (hasDocValues()) { - if (context.indexSortedOnField(name())) { - dvQuery = new IndexSortSortedNumericDocValuesRangeQuery(name(), l, u, dvQuery); - } - return dvQuery; + + // Not searchable. Must have doc values. + if (context.indexSortedOnField(name())) { + dvQuery = new IndexSortSortedNumericDocValuesRangeQuery(name(), l, u, dvQuery); } - return pointRangeQuery; + return dvQuery; }); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/Helper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/Helper.java index 17da7e5712be8..356380a687003 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/Helper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/Helper.java @@ -23,7 +23,7 @@ import org.opensearch.common.lucene.search.function.FunctionScoreQuery; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.query.DateRangeIncludingNowQuery; -import org.opensearch.search.approximate.ApproximateIndexOrDocValuesQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.search.internal.SearchContext; import java.io.IOException; @@ -55,7 +55,7 @@ private Helper() {} queryWrappers.put(FunctionScoreQuery.class, q -> ((FunctionScoreQuery) q).getSubQuery()); queryWrappers.put(DateRangeIncludingNowQuery.class, q -> ((DateRangeIncludingNowQuery) q).getQuery()); queryWrappers.put(IndexOrDocValuesQuery.class, q -> ((IndexOrDocValuesQuery) q).getIndexQuery()); - queryWrappers.put(ApproximateIndexOrDocValuesQuery.class, q -> ((ApproximateIndexOrDocValuesQuery) q).getOriginalQuery()); + queryWrappers.put(ApproximateScoreQuery.class, q -> ((ApproximateScoreQuery) q).getOriginalQuery()); } /** diff --git a/server/src/main/java/org/opensearch/search/approximate/ApproximateIndexOrDocValuesQuery.java b/server/src/main/java/org/opensearch/search/approximate/ApproximateIndexOrDocValuesQuery.java deleted file mode 100644 index b99e0a0cbf808..0000000000000 --- a/server/src/main/java/org/opensearch/search/approximate/ApproximateIndexOrDocValuesQuery.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.search.approximate; - -import org.apache.lucene.search.IndexOrDocValuesQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryVisitor; - -/** - * A wrapper around {@link IndexOrDocValuesQuery} that can be used to run approximate queries. - * It delegates to either {@link ApproximateQuery} or {@link IndexOrDocValuesQuery} based on whether the query can be approximated or not. - * @see ApproximateQuery - */ -public final class ApproximateIndexOrDocValuesQuery extends ApproximateScoreQuery { - - private final ApproximateQuery approximateIndexQuery; - private final IndexOrDocValuesQuery indexOrDocValuesQuery; - - public ApproximateIndexOrDocValuesQuery(Query indexQuery, ApproximateQuery approximateIndexQuery, Query dvQuery) { - super(new IndexOrDocValuesQuery(indexQuery, dvQuery), approximateIndexQuery); - this.approximateIndexQuery = approximateIndexQuery; - this.indexOrDocValuesQuery = new IndexOrDocValuesQuery(indexQuery, dvQuery); - } - - @Override - public String toString(String field) { - return "ApproximateIndexOrDocValuesQuery(indexQuery=" - + indexOrDocValuesQuery.getIndexQuery().toString(field) - + ", approximateIndexQuery=" - + approximateIndexQuery.toString(field) - + ", dvQuery=" - + indexOrDocValuesQuery.getRandomAccessQuery().toString(field) - + ")"; - } - - @Override - public void visit(QueryVisitor visitor) { - indexOrDocValuesQuery.visit(visitor); - } - - @Override - public boolean equals(Object obj) { - if (sameClassAs(obj) == false) { - return false; - } - return true; - } - - @Override - public int hashCode() { - int h = classHash(); - h = 31 * h + indexOrDocValuesQuery.getIndexQuery().hashCode(); - h = 31 * h + indexOrDocValuesQuery.getRandomAccessQuery().hashCode(); - return h; - } -} diff --git a/server/src/main/java/org/opensearch/search/approximate/ApproximatePointRangeQuery.java b/server/src/main/java/org/opensearch/search/approximate/ApproximatePointRangeQuery.java index 8076da6ab970b..6ff01f5f39d36 100644 --- a/server/src/main/java/org/opensearch/search/approximate/ApproximatePointRangeQuery.java +++ b/server/src/main/java/org/opensearch/search/approximate/ApproximatePointRangeQuery.java @@ -433,9 +433,6 @@ public boolean canApproximate(SearchContext context) { if (context.aggregations() != null) { return false; } - if (!(context.query() instanceof ApproximateIndexOrDocValuesQuery)) { - return false; - } // size 0 could be set for caching if (context.from() + context.size() == 0) { this.setSize(10_000); diff --git a/server/src/main/java/org/opensearch/search/approximate/ApproximateScoreQuery.java b/server/src/main/java/org/opensearch/search/approximate/ApproximateScoreQuery.java index d1dd32b239f28..2395142c606ae 100644 --- a/server/src/main/java/org/opensearch/search/approximate/ApproximateScoreQuery.java +++ b/server/src/main/java/org/opensearch/search/approximate/ApproximateScoreQuery.java @@ -21,12 +21,12 @@ * Entry-point for the approximation framework. * This class is heavily inspired by {@link org.apache.lucene.search.IndexOrDocValuesQuery}. It acts as a wrapper that consumer two queries, a regular query and an approximate version of the same. By default, it executes the regular query and returns {@link Weight#scorer} for the original query. At run-time, depending on certain constraints, we can re-write the {@code Weight} to use the approximate weight instead. */ -public class ApproximateScoreQuery extends Query { +public final class ApproximateScoreQuery extends Query { private final Query originalQuery; private final ApproximateQuery approximationQuery; - protected Query resolvedQuery; + Query resolvedQuery; public ApproximateScoreQuery(Query originalQuery, ApproximateQuery approximationQuery) { this.originalQuery = originalQuery; diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java index 259904fc143a1..15b16f4610062 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java @@ -41,6 +41,7 @@ import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSortSortedNumericDocValuesRangeQuery; import org.apache.lucene.search.Query; @@ -65,8 +66,8 @@ import org.opensearch.index.query.DateRangeIncludingNowQuery; import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.search.approximate.ApproximateIndexOrDocValuesQuery; import org.opensearch.search.approximate.ApproximatePointRangeQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.joda.time.DateTimeZone; import java.io.IOException; @@ -212,8 +213,11 @@ public void testTermQuery() { MappedFieldType ft = new DateFieldType("field"); String date = "2015-10-12T14:10:55"; long instant = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date)).toInstant().toEpochMilli(); - Query expected = new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery("field", instant, instant + 999), + Query expected = new ApproximateScoreQuery( + new IndexOrDocValuesQuery( + LongPoint.newRangeQuery("field", instant, instant + 999), + SortedNumericDocValuesField.newSlowRangeQuery("field", instant, instant + 999) + ), new ApproximatePointRangeQuery( "field", pack(new long[] { instant }).bytes, @@ -224,8 +228,7 @@ public void testTermQuery() { protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - SortedNumericDocValuesField.newSlowRangeQuery("field", instant, instant + 999) + } ); assumeThat( "Using Approximate Range Query as default", @@ -278,8 +281,11 @@ public void testRangeQuery() throws IOException { String date2 = "2016-04-28T11:33:52"; long instant1 = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date1)).toInstant().toEpochMilli(); long instant2 = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date2)).toInstant().toEpochMilli() + 999; - Query expected = new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery("field", instant1, instant2), + Query expected = new ApproximateScoreQuery( + new IndexOrDocValuesQuery( + LongPoint.newRangeQuery("field", instant1, instant2), + SortedNumericDocValuesField.newSlowRangeQuery("field", instant1, instant2) + ), new ApproximatePointRangeQuery( "field", pack(new long[] { instant1 }).bytes, @@ -290,8 +296,7 @@ public void testRangeQuery() throws IOException { protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - SortedNumericDocValuesField.newSlowRangeQuery("field", instant1, instant2) + } ); assumeThat( "Using Approximate Range Query as default", @@ -306,8 +311,11 @@ protected String toString(int dimension, byte[] value) { instant1 = nowInMillis; instant2 = instant1 + 100; expected = new DateRangeIncludingNowQuery( - new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery("field", instant1, instant2), + new ApproximateScoreQuery( + new IndexOrDocValuesQuery( + LongPoint.newRangeQuery("field", instant1, instant2), + SortedNumericDocValuesField.newSlowRangeQuery("field", instant1, instant2) + ), new ApproximatePointRangeQuery( "field", pack(new long[] { instant1 }).bytes, @@ -318,8 +326,7 @@ protected String toString(int dimension, byte[] value) { protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - SortedNumericDocValuesField.newSlowRangeQuery("field", instant1, instant2) + } ) ); assumeThat( @@ -388,8 +395,8 @@ public void testRangeQueryWithIndexSort() { "field", instant1, instant2, - new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery("field", instant1, instant2), + new ApproximateScoreQuery( + new IndexOrDocValuesQuery(LongPoint.newRangeQuery("field", instant1, instant2), dvQuery), new ApproximatePointRangeQuery( "field", pack(new long[] { instant1 }).bytes, @@ -400,8 +407,7 @@ public void testRangeQueryWithIndexSort() { protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - dvQuery + } ) ); assumeThat( diff --git a/server/src/test/java/org/opensearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java index 7a8ac829bdd97..a11a16f421783 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java @@ -50,8 +50,8 @@ import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.QueryStringQueryBuilder; -import org.opensearch.search.approximate.ApproximateIndexOrDocValuesQuery; import org.opensearch.search.approximate.ApproximatePointRangeQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; @@ -191,11 +191,14 @@ public void testDateRangeQuery() throws Exception { is(true) ); assertEquals( - new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery( - DATE_FIELD_NAME, - parser.parse(lowerBoundExact, () -> 0).toEpochMilli(), - parser.parse(upperBoundExact, () -> 0).toEpochMilli() + new ApproximateScoreQuery( + new IndexOrDocValuesQuery( + LongPoint.newRangeQuery( + DATE_FIELD_NAME, + parser.parse(lowerBoundExact, () -> 0).toEpochMilli(), + parser.parse(upperBoundExact, () -> 0).toEpochMilli() + ), + controlDv ), new ApproximatePointRangeQuery( DATE_FIELD_NAME, @@ -207,8 +210,7 @@ public void testDateRangeQuery() throws Exception { protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - controlDv + } ), queryOnDateField ); diff --git a/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java index b157c43e45451..04d4e6b3f852e 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java @@ -57,7 +57,7 @@ import org.opensearch.index.mapper.RangeFieldMapper.RangeFieldType; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.QueryShardException; -import org.opensearch.search.approximate.ApproximateIndexOrDocValuesQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.test.IndexSettingsModule; import org.joda.time.DateTime; import org.junit.Before; @@ -293,7 +293,7 @@ public void testDateRangeQueryUsingMappingFormatLegacy() { ); assertEquals( "field:[1465975790000 TO 1466062190999]", - ((IndexOrDocValuesQuery) ((ApproximateIndexOrDocValuesQuery) queryOnDateField).getOriginalQuery()).getIndexQuery().toString() + ((IndexOrDocValuesQuery) ((ApproximateScoreQuery) queryOnDateField).getOriginalQuery()).getIndexQuery().toString() ); } diff --git a/server/src/test/java/org/opensearch/index/query/MatchPhraseQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/MatchPhraseQueryBuilderTests.java index ddf58073a5206..48f697cff1bea 100644 --- a/server/src/test/java/org/opensearch/index/query/MatchPhraseQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/MatchPhraseQueryBuilderTests.java @@ -42,7 +42,7 @@ import org.apache.lucene.search.TermQuery; import org.opensearch.core.common.ParsingException; import org.opensearch.index.search.MatchQuery.ZeroTermsQuery; -import org.opensearch.search.approximate.ApproximateIndexOrDocValuesQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; @@ -131,7 +131,7 @@ protected void doAssertLuceneQuery(MatchPhraseQueryBuilder queryBuilder, Query q .or(instanceOf(PointRangeQuery.class)) .or(instanceOf(IndexOrDocValuesQuery.class)) .or(instanceOf(MatchNoDocsQuery.class)) - .or(instanceOf(ApproximateIndexOrDocValuesQuery.class)) + .or(instanceOf(ApproximateScoreQuery.class)) ); } diff --git a/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java index 5b030df20e889..6f295100d9e47 100644 --- a/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java @@ -47,6 +47,7 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MultiTermQuery; @@ -76,8 +77,8 @@ import org.opensearch.index.mapper.FieldNamesFieldMapper; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.search.QueryStringQueryParser; -import org.opensearch.search.approximate.ApproximateIndexOrDocValuesQuery; import org.opensearch.search.approximate.ApproximatePointRangeQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.test.AbstractQueryTestCase; import org.hamcrest.CoreMatchers; import org.hamcrest.Matchers; @@ -863,7 +864,7 @@ public void testToQueryDateWithTimeZone() throws Exception { FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), is(true) ); - assertThat(query, instanceOf(ApproximateIndexOrDocValuesQuery.class)); + assertThat(query, instanceOf(ApproximateScoreQuery.class)); long lower = 0; // 1970-01-01T00:00:00.999 UTC long upper = 86399999; // 1970-01-01T23:59:59.999 UTC assertEquals(calculateExpectedDateQuery(lower, upper), query); @@ -872,9 +873,12 @@ public void testToQueryDateWithTimeZone() throws Exception { assertEquals(calculateExpectedDateQuery(lower + msPerHour, upper + msPerHour), qsq.timeZone("-01:00").toQuery(context)); } - private ApproximateIndexOrDocValuesQuery calculateExpectedDateQuery(long lower, long upper) { - return new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery(DATE_FIELD_NAME, lower, upper), + private ApproximateScoreQuery calculateExpectedDateQuery(long lower, long upper) { + return new ApproximateScoreQuery( + new IndexOrDocValuesQuery( + LongPoint.newRangeQuery(DATE_FIELD_NAME, lower, upper), + SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, lower, upper) + ), new ApproximatePointRangeQuery( DATE_FIELD_NAME, pack(new long[] { lower }).bytes, @@ -885,8 +889,7 @@ private ApproximateIndexOrDocValuesQuery calculateExpectedDateQuery(long lower, protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, lower, upper) + } ); } diff --git a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java index 799d7c7b63462..79f1452db297b 100644 --- a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java @@ -55,9 +55,9 @@ import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MappedFieldType.Relation; import org.opensearch.index.mapper.MapperService; -import org.opensearch.search.approximate.ApproximateIndexOrDocValuesQuery; import org.opensearch.search.approximate.ApproximatePointRangeQuery; import org.opensearch.search.approximate.ApproximateQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.test.AbstractQueryTestCase; import org.joda.time.DateTime; import org.joda.time.chrono.ISOChronology; @@ -196,10 +196,10 @@ protected void doAssertLuceneQuery(RangeQueryBuilder queryBuilder, Query query, FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), is(true) ); - assertThat(query, instanceOf(ApproximateIndexOrDocValuesQuery.class)); - Query approximationQuery = ((ApproximateIndexOrDocValuesQuery) query).getApproximationQuery(); + assertThat(query, instanceOf(ApproximateScoreQuery.class)); + Query approximationQuery = ((ApproximateScoreQuery) query).getApproximationQuery(); assertThat(approximationQuery, instanceOf(ApproximateQuery.class)); - Query originalQuery = ((ApproximateIndexOrDocValuesQuery) query).getOriginalQuery(); + Query originalQuery = ((ApproximateScoreQuery) query).getOriginalQuery(); assertThat(originalQuery, instanceOf(IndexOrDocValuesQuery.class)); MapperService mapperService = context.getMapperService(); MappedFieldType mappedFieldType = mapperService.fieldType(expectedFieldName); @@ -250,8 +250,11 @@ protected void doAssertLuceneQuery(RangeQueryBuilder queryBuilder, Query query, } } assertEquals( - new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery(DATE_FIELD_NAME, minLong, maxLong), + new ApproximateScoreQuery( + new IndexOrDocValuesQuery( + LongPoint.newRangeQuery(DATE_FIELD_NAME, minLong, maxLong), + SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, minLong, maxLong) + ), new ApproximatePointRangeQuery( DATE_FIELD_NAME, pack(new long[] { minLong }).bytes, @@ -262,8 +265,7 @@ protected void doAssertLuceneQuery(RangeQueryBuilder queryBuilder, Query query, protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, minLong, maxLong) + } ), query ); @@ -336,16 +338,19 @@ public void testDateRangeQueryFormat() throws IOException { FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), is(true) ); - assertThat(parsedQuery, instanceOf(ApproximateIndexOrDocValuesQuery.class)); - Query approximationQuery = ((ApproximateIndexOrDocValuesQuery) parsedQuery).getApproximationQuery(); + assertThat(parsedQuery, instanceOf(ApproximateScoreQuery.class)); + Query approximationQuery = ((ApproximateScoreQuery) parsedQuery).getApproximationQuery(); assertThat(approximationQuery, instanceOf(ApproximateQuery.class)); - Query originalQuery = ((ApproximateIndexOrDocValuesQuery) parsedQuery).getOriginalQuery(); + Query originalQuery = ((ApproximateScoreQuery) parsedQuery).getOriginalQuery(); assertThat(originalQuery, instanceOf(IndexOrDocValuesQuery.class)); long lower = DateTime.parse("2012-01-01T00:00:00.000+00").getMillis(); long upper = DateTime.parse("2030-01-01T00:00:00.000+00").getMillis() - 1; assertEquals( - new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery(DATE_FIELD_NAME, lower, upper), + new ApproximateScoreQuery( + new IndexOrDocValuesQuery( + LongPoint.newRangeQuery(DATE_FIELD_NAME, lower, upper), + SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, lower, upper) + ), new ApproximatePointRangeQuery( DATE_FIELD_NAME, pack(new long[] { lower }).bytes, @@ -356,8 +361,7 @@ public void testDateRangeQueryFormat() throws IOException { protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, lower, upper) + } ), parsedQuery ); @@ -394,13 +398,16 @@ public void testDateRangeBoundaries() throws IOException { FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), is(true) ); - assertThat(parsedQuery, instanceOf(ApproximateIndexOrDocValuesQuery.class)); + assertThat(parsedQuery, instanceOf(ApproximateScoreQuery.class)); long lower = DateTime.parse("2014-11-01T00:00:00.000+00").getMillis(); long upper = DateTime.parse("2014-12-08T23:59:59.999+00").getMillis(); assertEquals( - new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery(DATE_FIELD_NAME, lower, upper), + new ApproximateScoreQuery( + new IndexOrDocValuesQuery( + LongPoint.newRangeQuery(DATE_FIELD_NAME, lower, upper), + SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, lower, upper) + ), new ApproximatePointRangeQuery( DATE_FIELD_NAME, pack(new long[] { lower }).bytes, @@ -411,8 +418,7 @@ public void testDateRangeBoundaries() throws IOException { protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, lower, upper) + } ) , @@ -430,12 +436,15 @@ protected String toString(int dimension, byte[] value) { + " }\n" + "}"; parsedQuery = parseQuery(query).toQuery(createShardContext()); - assertThat(parsedQuery, instanceOf(ApproximateIndexOrDocValuesQuery.class)); + assertThat(parsedQuery, instanceOf(ApproximateScoreQuery.class)); lower = DateTime.parse("2014-11-30T23:59:59.999+00").getMillis() + 1; upper = DateTime.parse("2014-12-08T00:00:00.000+00").getMillis() - 1; assertEquals( - new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery(DATE_FIELD_NAME, lower, upper), + new ApproximateScoreQuery( + new IndexOrDocValuesQuery( + LongPoint.newRangeQuery(DATE_FIELD_NAME, lower, upper), + SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, lower, upper) + ), new ApproximatePointRangeQuery( DATE_FIELD_NAME, pack(new long[] { lower }).bytes, @@ -446,8 +455,7 @@ protected String toString(int dimension, byte[] value) { protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, lower, upper) + } ) , @@ -476,8 +484,8 @@ public void testDateRangeQueryTimezone() throws IOException { FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), is(true) ); - assertThat(parsedQuery, instanceOf(ApproximateIndexOrDocValuesQuery.class)); - parsedQuery = ((ApproximateIndexOrDocValuesQuery) parsedQuery).getApproximationQuery(); + assertThat(parsedQuery, instanceOf(ApproximateScoreQuery.class)); + parsedQuery = ((ApproximateScoreQuery) parsedQuery).getApproximationQuery(); assertThat(parsedQuery, instanceOf(ApproximateQuery.class)); // TODO what else can we assert diff --git a/server/src/test/java/org/opensearch/search/approximate/ApproximateIndexOrDocValuesQueryTests.java b/server/src/test/java/org/opensearch/search/approximate/ApproximateIndexOrDocValuesQueryTests.java deleted file mode 100644 index 47f87c6abf629..0000000000000 --- a/server/src/test/java/org/opensearch/search/approximate/ApproximateIndexOrDocValuesQueryTests.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.search.approximate; - -import org.apache.lucene.document.LongPoint; -import org.apache.lucene.document.SortedNumericDocValuesField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.search.ConstantScoreWeight; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Weight; -import org.apache.lucene.store.Directory; -import org.opensearch.search.internal.SearchContext; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.After; -import org.junit.Before; - -import java.io.IOException; - -import static org.apache.lucene.document.LongPoint.pack; - -public class ApproximateIndexOrDocValuesQueryTests extends OpenSearchTestCase { - private Directory dir; - private IndexWriter w; - private DirectoryReader reader; - private IndexSearcher searcher; - - @Before - public void initSearcher() throws IOException { - dir = newDirectory(); - w = new IndexWriter(dir, newIndexWriterConfig()); - } - - @After - public void closeAllTheReaders() throws IOException { - reader.close(); - w.close(); - dir.close(); - } - - public void testApproximateIndexOrDocValuesQueryWeight() throws IOException { - - long l = Long.MIN_VALUE; - long u = Long.MAX_VALUE; - Query indexQuery = LongPoint.newRangeQuery("test-index", l, u); - - ApproximateQuery approximateIndexQuery = new ApproximatePointRangeQuery( - "test-index", - pack(new long[] { l }).bytes, - pack(new long[] { u }).bytes, - new long[] { l }.length - ) { - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - }; - - Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery("test-index", l, u); - - ApproximateIndexOrDocValuesQuery approximateIndexOrDocValuesQuery = new ApproximateIndexOrDocValuesQuery( - indexQuery, - approximateIndexQuery, - dvQuery - ); - - reader = DirectoryReader.open(w); - searcher = newSearcher(reader); - - approximateIndexOrDocValuesQuery.resolvedQuery = indexQuery; - - Weight weight = approximateIndexOrDocValuesQuery.rewrite(searcher).createWeight(searcher, ScoreMode.COMPLETE, 1f); - - assertTrue(weight instanceof ConstantScoreWeight); - - ApproximateQuery approximateIndexQueryCanApproximate = new ApproximatePointRangeQuery( - "test-index", - pack(new long[] { l }).bytes, - pack(new long[] { u }).bytes, - new long[] { l }.length - ) { - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - - public boolean canApproximate(SearchContext context) { - return true; - } - }; - - ApproximateIndexOrDocValuesQuery approximateIndexOrDocValuesQueryCanApproximate = new ApproximateIndexOrDocValuesQuery( - indexQuery, - approximateIndexQueryCanApproximate, - dvQuery - ); - - approximateIndexOrDocValuesQueryCanApproximate.resolvedQuery = approximateIndexQueryCanApproximate; - - Weight approximateIndexOrDocValuesQueryCanApproximateWeight = approximateIndexOrDocValuesQueryCanApproximate.rewrite(searcher) - .createWeight(searcher, ScoreMode.COMPLETE, 1f); - - // we get ConstantScoreWeight since we're expecting to call ApproximatePointRangeQuery - assertTrue(approximateIndexOrDocValuesQueryCanApproximateWeight instanceof ConstantScoreWeight); - - } -} From 20536eef239031af1161fa341a57597e7219fcd5 Mon Sep 17 00:00:00 2001 From: Sagar <99425694+sgup432@users.noreply.github.com> Date: Fri, 11 Oct 2024 18:35:50 -0700 Subject: [PATCH 03/19] Fix IndicesRequestCacheIt flaky tests (#16276) Signed-off-by: Sagar Upadhyaya --- .../org/opensearch/indices/IndicesRequestCacheIT.java | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 108ef14f0fcb4..557f9e19ee424 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -84,6 +84,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.concurrent.TimeUnit; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; @@ -586,6 +587,7 @@ public void testCacheWithFilteredAlias() throws InterruptedException { .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1)) .build(); String index = "index"; assertAcked( @@ -599,13 +601,14 @@ public void testCacheWithFilteredAlias() throws InterruptedException { ); ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC); client.prepareIndex(index).setId("1").setRouting("1").setSource("created_at", DateTimeFormatter.ISO_LOCAL_DATE.format(now)).get(); - // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge(index).setFlush(true).get(); - OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); refreshAndWaitForReplication(); indexRandomForConcurrentSearch(index); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge(index).setFlush(true).get(); + OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); + assertCacheState(client, index, 0, 0); SearchResponse r1 = client.prepareSearch(index) @@ -823,7 +826,7 @@ public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float bo SearchResponse resp = client.prepareSearch(index) .setRequestCache(true) .setQuery(timeoutQueryBuilder) - .setTimeout(TimeValue.ZERO) + .setTimeout(new TimeValue(5, TimeUnit.MILLISECONDS)) .get(); assertTrue(resp.isTimedOut()); RequestCacheStats requestCacheStats = getRequestCacheStats(client, index); From 5d2d3920f53113340df5b35c6bff6bfa9e9e7adb Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Mon, 14 Oct 2024 09:39:49 +0530 Subject: [PATCH 04/19] Skip unnecessary string format in RemoteStoreMigrationAllocationDecider when not in debug mode (#16299) Signed-off-by: Rishab Nahata --- ...RemoteStoreMigrationAllocationDecider.java | 45 ++++++++++++++----- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java index 67fe4ea1dcb1b..3da6288292181 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java @@ -89,7 +89,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing return allocation.decision( Decision.YES, NAME, - getDecisionDetails(true, shardRouting, targetNode, " for strict compatibility mode") + getDecisionDetails(true, shardRouting, targetNode, " for strict compatibility mode", allocation) ); } @@ -103,11 +103,15 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing return allocation.decision( isNoDecision ? Decision.NO : Decision.YES, NAME, - getDecisionDetails(!isNoDecision, shardRouting, targetNode, reason) + getDecisionDetails(!isNoDecision, shardRouting, targetNode, reason, allocation) ); } else if (migrationDirection.equals(Direction.DOCREP)) { // docrep migration direction is currently not supported - return allocation.decision(Decision.YES, NAME, getDecisionDetails(true, shardRouting, targetNode, " for DOCREP direction")); + return allocation.decision( + Decision.YES, + NAME, + getDecisionDetails(true, shardRouting, targetNode, " for DOCREP direction", allocation) + ); } else { // check for remote store backed indices if (remoteSettingsBackedIndex && targetNode.isRemoteStoreNode() == false) { @@ -115,7 +119,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing String reason = new StringBuilder(" because a remote store backed index's shard copy can only be ").append( (shardRouting.assignedToNode() == false) ? "allocated" : "relocated" ).append(" to a remote node").toString(); - return allocation.decision(Decision.NO, NAME, getDecisionDetails(false, shardRouting, targetNode, reason)); + return allocation.decision(Decision.NO, NAME, getDecisionDetails(false, shardRouting, targetNode, reason, allocation)); } if (shardRouting.primary()) { @@ -128,9 +132,9 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing // handle scenarios for allocation of a new shard's primary copy private Decision primaryShardDecision(ShardRouting primaryShardRouting, DiscoveryNode targetNode, RoutingAllocation allocation) { if (targetNode.isRemoteStoreNode() == false) { - return allocation.decision(Decision.NO, NAME, getDecisionDetails(false, primaryShardRouting, targetNode, "")); + return allocation.decision(Decision.NO, NAME, getDecisionDetails(false, primaryShardRouting, targetNode, "", allocation)); } - return allocation.decision(Decision.YES, NAME, getDecisionDetails(true, primaryShardRouting, targetNode, "")); + return allocation.decision(Decision.YES, NAME, getDecisionDetails(true, primaryShardRouting, targetNode, "", allocation)); } // Checks if primary shard is on a remote node. @@ -150,20 +154,41 @@ private Decision replicaShardDecision(ShardRouting replicaShardRouting, Discover return allocation.decision( Decision.NO, NAME, - getDecisionDetails(false, replicaShardRouting, targetNode, " since primary shard copy is not yet migrated to remote") + getDecisionDetails( + false, + replicaShardRouting, + targetNode, + " since primary shard copy is not yet migrated to remote", + allocation + ) ); } return allocation.decision( Decision.YES, NAME, - getDecisionDetails(true, replicaShardRouting, targetNode, " since primary shard copy has been migrated to remote") + getDecisionDetails( + true, + replicaShardRouting, + targetNode, + " since primary shard copy has been migrated to remote", + allocation + ) ); } - return allocation.decision(Decision.YES, NAME, getDecisionDetails(true, replicaShardRouting, targetNode, "")); + return allocation.decision(Decision.YES, NAME, getDecisionDetails(true, replicaShardRouting, targetNode, "", allocation)); } // get detailed reason for the decision - private String getDecisionDetails(boolean isYes, ShardRouting shardRouting, DiscoveryNode targetNode, String reason) { + private String getDecisionDetails( + boolean isYes, + ShardRouting shardRouting, + DiscoveryNode targetNode, + String reason, + RoutingAllocation allocation + ) { + if (allocation.debugDecision() == false) { + return null; + } return new StringBuilder("[").append(migrationDirection.direction) .append(" migration_direction]: ") .append(shardRouting.primary() ? "primary" : "replica") From 2166b449d99e5afd9a8d943f7245de2f2635fe5c Mon Sep 17 00:00:00 2001 From: Ashish Singh Date: Mon, 14 Oct 2024 10:57:48 +0530 Subject: [PATCH 05/19] Optimise clone operation for incremental full cluster snapshots (#16296) * Optimise clone operation for incremental full cluster snapshots Signed-off-by: Ashish Singh * Add UTs Signed-off-by: Ashish Singh * Add CHANGELOG Signed-off-by: Ashish Singh --------- Signed-off-by: Ashish Singh --- CHANGELOG.md | 2 +- .../snapshots/SnapshotsService.java | 121 ++--- .../snapshots/SnapshotsServiceTests.java | 439 ++++++++++++++++++ 3 files changed, 504 insertions(+), 58 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 43993cd7ca9dd..929b3561cdbb0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,7 +55,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remove Identity FeatureFlag ([#16024](https://github.com/opensearch-project/OpenSearch/pull/16024)) - Ensure RestHandler.Wrapper delegates all implementations to the wrapped handler ([#16154](https://github.com/opensearch-project/OpenSearch/pull/16154)) - Code cleanup: Remove ApproximateIndexOrDocValuesQuery ([#16273](https://github.com/opensearch-project/OpenSearch/pull/16273)) - +- Optimise clone operation for incremental full cluster snapshots ([#16296](https://github.com/opensearch-project/OpenSearch/pull/16296)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 541a87c200883..2df094ebe540b 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -1327,7 +1327,8 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS private final Set currentlyCloning = Collections.synchronizedSet(new HashSet<>()); - private void runReadyClone( + // Made to package private to be able to test the method in UTs + void runReadyClone( Snapshot target, SnapshotId sourceSnapshot, ShardSnapshotStatus shardStatusBefore, @@ -1351,69 +1352,75 @@ public void onFailure(Exception e) { @Override protected void doRun() { final String localNodeId = clusterService.localNode().getId(); - repository.getRepositoryData(ActionListener.wrap(repositoryData -> { - try { - final IndexMetadata indexMetadata = repository.getSnapshotIndexMetaData( - repositoryData, + if (remoteStoreIndexShallowCopy == false) { + executeClone(localNodeId, false); + } else { + repository.getRepositoryData(ActionListener.wrap(repositoryData -> { + try { + final IndexMetadata indexMetadata = repository.getSnapshotIndexMetaData( + repositoryData, + sourceSnapshot, + repoShardId.index() + ); + final boolean cloneRemoteStoreIndexShardSnapshot = indexMetadata.getSettings() + .getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false); + executeClone(localNodeId, cloneRemoteStoreIndexShardSnapshot); + } catch (IOException e) { + logger.warn("Failed to get index-metadata from repository data for index [{}]", repoShardId.index().getName()); + failCloneShardAndUpdateClusterState(target, sourceSnapshot, repoShardId); + } + }, this::onFailure)); + } + } + + private void executeClone(String localNodeId, boolean cloneRemoteStoreIndexShardSnapshot) { + if (currentlyCloning.add(repoShardId)) { + if (cloneRemoteStoreIndexShardSnapshot) { + repository.cloneRemoteStoreIndexShardSnapshot( sourceSnapshot, - repoShardId.index() + target.getSnapshotId(), + repoShardId, + shardStatusBefore.generation(), + remoteStoreLockManagerFactory, + getCloneCompletionListener(localNodeId) ); - final boolean cloneRemoteStoreIndexShardSnapshot = remoteStoreIndexShallowCopy - && indexMetadata.getSettings().getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false); - final SnapshotId targetSnapshot = target.getSnapshotId(); - final ActionListener listener = ActionListener.wrap( - generation -> innerUpdateSnapshotState( - new ShardSnapshotUpdate( - target, - repoShardId, - new ShardSnapshotStatus(localNodeId, ShardState.SUCCESS, generation) - ), - ActionListener.runBefore( - ActionListener.wrap( - v -> logger.trace( - "Marked [{}] as successfully cloned from [{}] to [{}]", - repoShardId, - sourceSnapshot, - targetSnapshot - ), - e -> { - logger.warn("Cluster state update after successful shard clone [{}] failed", repoShardId); - failAllListenersOnMasterFailOver(e); - } - ), - () -> currentlyCloning.remove(repoShardId) - ) - ), - e -> { - logger.warn("Exception [{}] while trying to clone shard [{}]", e, repoShardId); - failCloneShardAndUpdateClusterState(target, sourceSnapshot, repoShardId); - } + } else { + repository.cloneShardSnapshot( + sourceSnapshot, + target.getSnapshotId(), + repoShardId, + shardStatusBefore.generation(), + getCloneCompletionListener(localNodeId) ); - if (currentlyCloning.add(repoShardId)) { - if (cloneRemoteStoreIndexShardSnapshot) { - repository.cloneRemoteStoreIndexShardSnapshot( - sourceSnapshot, - targetSnapshot, + } + } + } + + private ActionListener getCloneCompletionListener(String localNodeId) { + return ActionListener.wrap( + generation -> innerUpdateSnapshotState( + new ShardSnapshotUpdate(target, repoShardId, new ShardSnapshotStatus(localNodeId, ShardState.SUCCESS, generation)), + ActionListener.runBefore( + ActionListener.wrap( + v -> logger.trace( + "Marked [{}] as successfully cloned from [{}] to [{}]", repoShardId, - shardStatusBefore.generation(), - remoteStoreLockManagerFactory, - listener - ); - } else { - repository.cloneShardSnapshot( sourceSnapshot, - targetSnapshot, - repoShardId, - shardStatusBefore.generation(), - listener - ); - } - } - } catch (IOException e) { - logger.warn("Failed to get index-metadata from repository data for index [{}]", repoShardId.index().getName()); + target.getSnapshotId() + ), + e -> { + logger.warn("Cluster state update after successful shard clone [{}] failed", repoShardId); + failAllListenersOnMasterFailOver(e); + } + ), + () -> currentlyCloning.remove(repoShardId) + ) + ), + e -> { + logger.warn("Exception [{}] while trying to clone shard [{}]", e, repoShardId); failCloneShardAndUpdateClusterState(target, sourceSnapshot, repoShardId); } - }, this::onFailure)); + ); } }); } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotsServiceTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotsServiceTests.java index 1652ba29b8fa7..e374636f60d22 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotsServiceTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotsServiceTests.java @@ -33,24 +33,41 @@ package org.opensearch.snapshots; import org.opensearch.Version; +import org.opensearch.action.support.ActionFilters; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.repositories.IndexId; +import org.opensearch.repositories.IndexMetaDataGenerations; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryData; +import org.opensearch.repositories.RepositoryException; import org.opensearch.repositories.RepositoryShardId; +import org.opensearch.repositories.ShardGenerations; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; +import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -58,8 +75,17 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; +import org.mockito.ArgumentCaptor; + import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class SnapshotsServiceTests extends OpenSearchTestCase { @@ -404,6 +430,419 @@ public void testCompletedCloneStartsNextClone() throws Exception { assertIsNoop(updatedClusterState, completeShardClone); } + /** + * Tests the runReadyClone method when remote store index shallow copy is disabled. + * It verifies that: + * 1. getRepositoryData is never called + * 2. cloneShardSnapshot is called with the correct parameters + */ + public void testRunReadyCloneWithRemoteStoreIndexShallowCopyDisabled() throws Exception { + String repoName = "test-repo"; + Snapshot target = snapshot(repoName, "target-snapshot"); + SnapshotId sourceSnapshot = new SnapshotId("source-snapshot", uuid()); + RepositoryShardId repoShardId = new RepositoryShardId(indexId("test-index"), 0); + SnapshotsInProgress.ShardSnapshotStatus shardStatusBefore = initShardStatus(uuid()); + + Repository mockRepository = mock(Repository.class); + try (SnapshotsService snapshotsService = createSnapshotsService()) { + snapshotsService.runReadyClone(target, sourceSnapshot, shardStatusBefore, repoShardId, mockRepository, false); + verify(mockRepository, never()).getRepositoryData(any()); + verify(mockRepository).cloneShardSnapshot( + eq(sourceSnapshot), + eq(target.getSnapshotId()), + eq(repoShardId), + eq(shardStatusBefore.generation()), + any() + ); + } + } + + /** + * Tests the runReadyClone method when remote store index shallow copy is enabled. + * It verifies that: + * 1. getRepositoryData is called + * 2. cloneRemoteStoreIndexShardSnapshot is called with the correct parameters + * This test simulates a scenario where the index has remote store enabled. + */ + + public void testRunReadyCloneWithRemoteStoreIndexShallowCopyEnabled() throws Exception { + String repoName = "test-repo"; + Snapshot target = snapshot(repoName, "target-snapshot"); + SnapshotId sourceSnapshot = new SnapshotId("source-snapshot", uuid()); + RepositoryShardId repoShardId = new RepositoryShardId(indexId("test-index"), 0); + SnapshotsInProgress.ShardSnapshotStatus shardStatusBefore = initShardStatus(uuid()); + + Repository mockRepository = mock(Repository.class); + + // Create a real RepositoryData instance + RepositoryData repositoryData = new RepositoryData( + RepositoryData.EMPTY_REPO_GEN, + Collections.singletonMap(sourceSnapshot.getName(), sourceSnapshot), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + ShardGenerations.EMPTY, + IndexMetaDataGenerations.EMPTY + ); + + // Mock the getRepositoryData method to use the ActionListener + doAnswer(invocation -> { + ActionListener listener = invocation.getArgument(0); + listener.onResponse(repositoryData); + return null; + }).when(mockRepository).getRepositoryData(any(ActionListener.class)); + + IndexMetadata mockIndexMetadata = mock(IndexMetadata.class); + when(mockRepository.getSnapshotIndexMetaData(eq(repositoryData), eq(sourceSnapshot), any())).thenReturn(mockIndexMetadata); + + Settings mockSettings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true).build(); + when(mockIndexMetadata.getSettings()).thenReturn(mockSettings); + + try (SnapshotsService snapshotsService = createSnapshotsService()) { + snapshotsService.runReadyClone(target, sourceSnapshot, shardStatusBefore, repoShardId, mockRepository, true); + + // Verify that getRepositoryData was called + verify(mockRepository).getRepositoryData(any(ActionListener.class)); + + // Verify that cloneRemoteStoreIndexShardSnapshot was called with the correct arguments + verify(mockRepository).cloneRemoteStoreIndexShardSnapshot( + eq(sourceSnapshot), + eq(target.getSnapshotId()), + eq(repoShardId), + eq(shardStatusBefore.generation()), + any(), + any() + ); + } + } + + /** + * Tests the error handling in runReadyClone when a RepositoryException occurs. + * It verifies that: + * 1. getRepositoryData is called and throws an exception + * 2. Neither cloneShardSnapshot nor cloneRemoteStoreIndexShardSnapshot are called + */ + public void testRunReadyCloneWithRepositoryException() throws Exception { + String repoName = "test-repo"; + Snapshot target = snapshot(repoName, "target-snapshot"); + SnapshotId sourceSnapshot = new SnapshotId("source-snapshot", uuid()); + RepositoryShardId repoShardId = new RepositoryShardId(indexId("test-index"), 0); + SnapshotsInProgress.ShardSnapshotStatus shardStatusBefore = initShardStatus(uuid()); + + Repository mockRepository = mock(Repository.class); + + // Mock the getRepositoryData method to throw an exception + doAnswer(invocation -> { + ActionListener listener = invocation.getArgument(0); + listener.onFailure(new RepositoryException(repoName, "Test exception")); + return null; + }).when(mockRepository).getRepositoryData(any(ActionListener.class)); + + try (SnapshotsService snapshotsService = createSnapshotsService()) { + snapshotsService.runReadyClone(target, sourceSnapshot, shardStatusBefore, repoShardId, mockRepository, true); + + // Verify that getRepositoryData was called + verify(mockRepository).getRepositoryData(any(ActionListener.class)); + + // Verify that neither cloneShardSnapshot nor cloneRemoteStoreIndexShardSnapshot were called + verify(mockRepository, never()).cloneShardSnapshot(any(), any(), any(), any(), any()); + verify(mockRepository, never()).cloneRemoteStoreIndexShardSnapshot(any(), any(), any(), any(), any(), any()); + } + } + + /** + * Tests the runReadyClone method when remote store index shallow copy is globally enabled, + * but disabled for the specific index. + * It verifies that: + * 1. getRepositoryData is called + * 2. cloneShardSnapshot is called instead of cloneRemoteStoreIndexShardSnapshot + */ + public void testRunReadyCloneWithRemoteStoreIndexShallowCopyEnabledButIndexDisabled() throws Exception { + String repoName = "test-repo"; + Snapshot target = snapshot(repoName, "target-snapshot"); + SnapshotId sourceSnapshot = new SnapshotId("source-snapshot", uuid()); + RepositoryShardId repoShardId = new RepositoryShardId(indexId("test-index"), 0); + SnapshotsInProgress.ShardSnapshotStatus shardStatusBefore = initShardStatus(uuid()); + + Repository mockRepository = mock(Repository.class); + + RepositoryData repositoryData = new RepositoryData( + RepositoryData.EMPTY_REPO_GEN, + Collections.singletonMap(sourceSnapshot.getName(), sourceSnapshot), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + ShardGenerations.EMPTY, + IndexMetaDataGenerations.EMPTY + ); + + doAnswer(invocation -> { + ActionListener listener = invocation.getArgument(0); + listener.onResponse(repositoryData); + return null; + }).when(mockRepository).getRepositoryData(any(ActionListener.class)); + + IndexMetadata mockIndexMetadata = mock(IndexMetadata.class); + when(mockRepository.getSnapshotIndexMetaData(eq(repositoryData), eq(sourceSnapshot), any())).thenReturn(mockIndexMetadata); + + Settings mockSettings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false).build(); + when(mockIndexMetadata.getSettings()).thenReturn(mockSettings); + + try (SnapshotsService snapshotsService = createSnapshotsService()) { + snapshotsService.runReadyClone(target, sourceSnapshot, shardStatusBefore, repoShardId, mockRepository, true); + + verify(mockRepository).getRepositoryData(any(ActionListener.class)); + verify(mockRepository).cloneShardSnapshot( + eq(sourceSnapshot), + eq(target.getSnapshotId()), + eq(repoShardId), + eq(shardStatusBefore.generation()), + any() + ); + } + } + + /** + * Tests the error handling in runReadyClone when an IOException occurs while getting snapshot index metadata. + * It verifies that: + * 1. getRepositoryData is called + * 2. getSnapshotIndexMetaData throws an IOException + * 3. Neither cloneShardSnapshot nor cloneRemoteStoreIndexShardSnapshot are called + */ + public void testRunReadyCloneWithIOException() throws Exception { + String repoName = "test-repo"; + Snapshot target = snapshot(repoName, "target-snapshot"); + SnapshotId sourceSnapshot = new SnapshotId("source-snapshot", uuid()); + RepositoryShardId repoShardId = new RepositoryShardId(indexId("test-index"), 0); + SnapshotsInProgress.ShardSnapshotStatus shardStatusBefore = initShardStatus(uuid()); + + Repository mockRepository = mock(Repository.class); + + RepositoryData repositoryData = new RepositoryData( + RepositoryData.EMPTY_REPO_GEN, + Collections.singletonMap(sourceSnapshot.getName(), sourceSnapshot), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + ShardGenerations.EMPTY, + IndexMetaDataGenerations.EMPTY + ); + + doAnswer(invocation -> { + ActionListener listener = invocation.getArgument(0); + listener.onResponse(repositoryData); + return null; + }).when(mockRepository).getRepositoryData(any(ActionListener.class)); + + when(mockRepository.getSnapshotIndexMetaData(eq(repositoryData), eq(sourceSnapshot), any())).thenThrow( + new IOException("Test IO Exception") + ); + + try (SnapshotsService snapshotsService = createSnapshotsService()) { + snapshotsService.runReadyClone(target, sourceSnapshot, shardStatusBefore, repoShardId, mockRepository, true); + + verify(mockRepository).getRepositoryData(any(ActionListener.class)); + verify(mockRepository).getSnapshotIndexMetaData(eq(repositoryData), eq(sourceSnapshot), any()); + verify(mockRepository, never()).cloneShardSnapshot(any(), any(), any(), any(), any()); + verify(mockRepository, never()).cloneRemoteStoreIndexShardSnapshot(any(), any(), any(), any(), any(), any()); + } + } + + /** + * Tests the completion listener functionality in runReadyClone when the operation is successful. + * It verifies that: + * 1. The clone operation completes successfully + * 2. The correct ShardSnapshotUpdate is submitted to the cluster state + */ + public void testRunReadyCloneCompletionListener() throws Exception { + String repoName = "test-repo"; + Snapshot target = snapshot(repoName, "target-snapshot"); + SnapshotId sourceSnapshot = new SnapshotId("source-snapshot", uuid()); + RepositoryShardId repoShardId = new RepositoryShardId(indexId("test-index"), 0); + SnapshotsInProgress.ShardSnapshotStatus shardStatusBefore = initShardStatus(uuid()); + + Repository mockRepository = mock(Repository.class); + + RepositoryData repositoryData = new RepositoryData( + RepositoryData.EMPTY_REPO_GEN, + Collections.singletonMap(sourceSnapshot.getName(), sourceSnapshot), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + ShardGenerations.EMPTY, + IndexMetaDataGenerations.EMPTY + ); + + doAnswer(invocation -> { + ActionListener listener = invocation.getArgument(0); + listener.onResponse(repositoryData); + return null; + }).when(mockRepository).getRepositoryData(any(ActionListener.class)); + + IndexMetadata mockIndexMetadata = mock(IndexMetadata.class); + when(mockRepository.getSnapshotIndexMetaData(eq(repositoryData), eq(sourceSnapshot), any())).thenReturn(mockIndexMetadata); + + Settings mockSettings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true).build(); + when(mockIndexMetadata.getSettings()).thenReturn(mockSettings); + + String newGeneration = "new_generation"; + doAnswer(invocation -> { + ActionListener listener = invocation.getArgument(5); + listener.onResponse(newGeneration); + return null; + }).when(mockRepository).cloneRemoteStoreIndexShardSnapshot(any(), any(), any(), any(), any(), any(ActionListener.class)); + + ClusterService mockClusterService = mock(ClusterService.class); + DiscoveryNode localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT); + when(mockClusterService.localNode()).thenReturn(localNode); + + SnapshotsService snapshotsService = createSnapshotsService(mockClusterService); + + snapshotsService.runReadyClone(target, sourceSnapshot, shardStatusBefore, repoShardId, mockRepository, true); + + verify(mockRepository).getRepositoryData(any(ActionListener.class)); + verify(mockRepository).cloneRemoteStoreIndexShardSnapshot( + eq(sourceSnapshot), + eq(target.getSnapshotId()), + eq(repoShardId), + eq(shardStatusBefore.generation()), + any(), + any() + ); + + // Verify that innerUpdateSnapshotState was called with the correct arguments + ArgumentCaptor updateCaptor = ArgumentCaptor.forClass( + SnapshotsService.ShardSnapshotUpdate.class + ); + verify(mockClusterService).submitStateUpdateTask(eq("update snapshot state"), updateCaptor.capture(), any(), any(), any()); + + SnapshotsService.ShardSnapshotUpdate capturedUpdate = updateCaptor.getValue(); + SnapshotsInProgress.ShardSnapshotStatus expectedStatus = new SnapshotsInProgress.ShardSnapshotStatus( + localNode.getId(), + SnapshotsInProgress.ShardState.SUCCESS, + newGeneration + ); + SnapshotsService.ShardSnapshotUpdate expectedUpdate = new SnapshotsService.ShardSnapshotUpdate(target, repoShardId, expectedStatus); + assertEquals(expectedUpdate.hashCode(), capturedUpdate.hashCode()); + } + + /** + * Tests the completion listener functionality in runReadyClone when the operation fails. + * It verifies that: + * 1. The clone operation fails with an exception + * 2. The correct failed ShardSnapshotUpdate is submitted to the cluster state + */ + public void testRunReadyCloneCompletionListenerFailure() throws Exception { + String repoName = "test-repo"; + Snapshot target = snapshot(repoName, "target-snapshot"); + SnapshotId sourceSnapshot = new SnapshotId("source-snapshot", uuid()); + RepositoryShardId repoShardId = new RepositoryShardId(indexId("test-index"), 0); + SnapshotsInProgress.ShardSnapshotStatus shardStatusBefore = initShardStatus(uuid()); + + Repository mockRepository = mock(Repository.class); + + RepositoryData repositoryData = new RepositoryData( + RepositoryData.EMPTY_REPO_GEN, + Collections.singletonMap(sourceSnapshot.getName(), sourceSnapshot), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + ShardGenerations.EMPTY, + IndexMetaDataGenerations.EMPTY + ); + + doAnswer(invocation -> { + ActionListener listener = invocation.getArgument(0); + listener.onResponse(repositoryData); + return null; + }).when(mockRepository).getRepositoryData(any(ActionListener.class)); + + IndexMetadata mockIndexMetadata = mock(IndexMetadata.class); + when(mockRepository.getSnapshotIndexMetaData(eq(repositoryData), eq(sourceSnapshot), any())).thenReturn(mockIndexMetadata); + + Settings mockSettings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true).build(); + when(mockIndexMetadata.getSettings()).thenReturn(mockSettings); + + Exception testException = new RuntimeException("Test exception"); + doAnswer(invocation -> { + ActionListener listener = invocation.getArgument(5); + listener.onFailure(testException); + return null; + }).when(mockRepository).cloneRemoteStoreIndexShardSnapshot(any(), any(), any(), any(), any(), any(ActionListener.class)); + + ClusterService mockClusterService = mock(ClusterService.class); + DiscoveryNode localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT); + when(mockClusterService.localNode()).thenReturn(localNode); + + SnapshotsService snapshotsService = createSnapshotsService(mockClusterService); + + snapshotsService.runReadyClone(target, sourceSnapshot, shardStatusBefore, repoShardId, mockRepository, true); + + verify(mockRepository).getRepositoryData(any(ActionListener.class)); + verify(mockRepository).cloneRemoteStoreIndexShardSnapshot( + eq(sourceSnapshot), + eq(target.getSnapshotId()), + eq(repoShardId), + eq(shardStatusBefore.generation()), + any(), + any() + ); + + ArgumentCaptor updateCaptor = ArgumentCaptor.forClass( + SnapshotsService.ShardSnapshotUpdate.class + ); + verify(mockClusterService).submitStateUpdateTask(eq("update snapshot state"), updateCaptor.capture(), any(), any(), any()); + + SnapshotsService.ShardSnapshotUpdate capturedUpdate = updateCaptor.getValue(); + SnapshotsInProgress.ShardSnapshotStatus expectedStatus = new SnapshotsInProgress.ShardSnapshotStatus( + localNode.getId(), + SnapshotsInProgress.ShardState.FAILED, + "failed to clone shard snapshot", + null + ); + SnapshotsService.ShardSnapshotUpdate expectedUpdate = new SnapshotsService.ShardSnapshotUpdate(target, repoShardId, expectedStatus); + assertEquals(expectedUpdate.hashCode(), capturedUpdate.hashCode()); + } + + /** + * Helper method to create a SnapshotsService instance with a provided ClusterService. + * This method mocks all necessary dependencies for the SnapshotsService. + */ + private SnapshotsService createSnapshotsService(ClusterService mockClusterService) { + ThreadPool mockThreadPool = mock(ThreadPool.class); + when(mockThreadPool.executor(ThreadPool.Names.SNAPSHOT)).thenReturn(OpenSearchExecutors.newDirectExecutorService()); + + RepositoriesService mockRepoService = mock(RepositoriesService.class); + TransportService mockTransportService = mock(TransportService.class); + when(mockTransportService.getThreadPool()).thenReturn(mockThreadPool); + + DiscoveryNode localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT); + when(mockClusterService.localNode()).thenReturn(localNode); + + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + when(mockClusterService.getClusterSettings()).thenReturn(clusterSettings); + + return new SnapshotsService( + Settings.EMPTY, + mockClusterService, + mock(IndexNameExpressionResolver.class), + mockRepoService, + mockTransportService, + mock(ActionFilters.class), + null, + mock(RemoteStoreSettings.class) + ); + } + + /** + * Helper method to create a SnapshotsService instance with a mocked ClusterService. + * This method is a convenience wrapper around createSnapshotsService(ClusterService). + */ + private SnapshotsService createSnapshotsService() { + ClusterService mockClusterService = mock(ClusterService.class); + return createSnapshotsService(mockClusterService); + } + private static DiscoveryNodes discoveryNodes(String localNodeId) { return DiscoveryNodes.builder().localNodeId(localNodeId).build(); } From 55e98ed2735d898d884295c00f869f5f3278c222 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Mon, 14 Oct 2024 21:00:49 +0800 Subject: [PATCH 06/19] Fix multi-search with template doesn't return status code (#16265) * Fix multi-search with template doesn't return status code Signed-off-by: Gao Binlong * Modify changelog Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong --- CHANGELOG.md | 1 + .../mustache/MultiSearchTemplateResponse.java | 3 + .../mustache/SearchTemplateResponse.java | 5 +- .../MultiSearchTemplateResponseTests.java | 79 +++++++++++++++++++ .../mustache/SearchTemplateResponseTests.java | 1 + .../50_multi_search_template.yml | 48 +++++++++++ 6 files changed, 136 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 929b3561cdbb0..bc6c8b024c692 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -73,6 +73,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Streaming bulk request hangs ([#16158](https://github.com/opensearch-project/OpenSearch/pull/16158)) - Fix warnings from SLF4J on startup when repository-s3 is installed ([#16194](https://github.com/opensearch-project/OpenSearch/pull/16194)) - Fix protobuf-java leak through client library dependencies ([#16254](https://github.com/opensearch-project/OpenSearch/pull/16254)) +- Fix multi-search with template doesn't return status code ([#16265](https://github.com/opensearch-project/OpenSearch/pull/16265)) ### Security diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java index ead93158b421c..a84dd85de98ff 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java @@ -32,6 +32,7 @@ package org.opensearch.script.mustache; +import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.common.Nullable; @@ -167,6 +168,7 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par if (item.isFailure()) { builder.startObject(); OpenSearchException.generateFailureXContent(builder, params, item.getFailure(), true); + builder.field(Fields.STATUS, ExceptionsHelper.status(item.getFailure()).getStatus()); builder.endObject(); } else { item.getResponse().toXContent(builder, params); @@ -179,6 +181,7 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par static final class Fields { static final String RESPONSES = "responses"; + static final String STATUS = "status"; } public static MultiSearchTemplateResponse fromXContext(XContentParser parser) { diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateResponse.java index 9cb6ac127786a..534b70cf80f73 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateResponse.java @@ -120,7 +120,10 @@ public static SearchTemplateResponse fromXContent(XContentParser parser) throws @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (hasResponse()) { - response.toXContent(builder, params); + builder.startObject(); + response.innerToXContent(builder, params); + builder.field(MultiSearchTemplateResponse.Fields.STATUS, response.status().getStatus()); + builder.endObject(); } else { builder.startObject(); // we can assume the template is always json as we convert it before compiling it diff --git a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MultiSearchTemplateResponseTests.java b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MultiSearchTemplateResponseTests.java index aeb7cdc2c6a28..ffd282e4fedc0 100644 --- a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MultiSearchTemplateResponseTests.java +++ b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MultiSearchTemplateResponseTests.java @@ -34,8 +34,12 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.ShardSearchFailure; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.test.AbstractXContentTestCase; @@ -44,6 +48,7 @@ import java.util.function.Predicate; import java.util.function.Supplier; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -177,4 +182,78 @@ public void testFromXContentWithFailures() throws IOException { ToXContent.EMPTY_PARAMS ); } + + public void testToXContentWithFailures() throws Exception { + long overallTookInMillis = randomNonNegativeLong(); + MultiSearchTemplateResponse.Item[] items = new MultiSearchTemplateResponse.Item[2]; + + long tookInMillis = 1L; + int totalShards = 2; + int successfulShards = 2; + int skippedShards = 0; + InternalSearchResponse internalSearchResponse = InternalSearchResponse.empty(); + SearchTemplateResponse searchTemplateResponse = new SearchTemplateResponse(); + SearchResponse searchResponse = new SearchResponse( + internalSearchResponse, + null, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ); + searchTemplateResponse.setResponse(searchResponse); + items[0] = new MultiSearchTemplateResponse.Item(searchTemplateResponse, null); + + items[1] = new MultiSearchTemplateResponse.Item(null, new IllegalArgumentException("Invalid argument")); + + MultiSearchTemplateResponse response = new MultiSearchTemplateResponse(items, overallTookInMillis); + + XContentType contentType = randomFrom(XContentType.values()); + XContentBuilder expectedResponse = MediaTypeRegistry.contentBuilder(contentType) + .startObject() + .field("took", overallTookInMillis) + .startArray("responses") + .startObject() + .field("took", 1) + .field("timed_out", false) + .startObject("_shards") + .field("total", 2) + .field("successful", 2) + .field("skipped", 0) + .field("failed", 0) + .endObject() + .startObject("hits") + .startObject("total") + .field("value", 0) + .field("relation", "eq") + .endObject() + .field("max_score", 0.0F) + .startArray("hits") + .endArray() + .endObject() + .field("status", 200) + .endObject() + .startObject() + .startObject("error") + .field("type", "illegal_argument_exception") + .field("reason", "Invalid argument") + .startArray("root_cause") + .startObject() + .field("type", "illegal_argument_exception") + .field("reason", "Invalid argument") + .endObject() + .endArray() + .endObject() + .field("status", 400) + .endObject() + .endArray() + .endObject(); + + XContentBuilder actualResponse = MediaTypeRegistry.contentBuilder(contentType); + response.toXContent(actualResponse, ToXContent.EMPTY_PARAMS); + + assertToXContentEquivalent(BytesReference.bytes(expectedResponse), BytesReference.bytes(actualResponse), contentType); + } } diff --git a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateResponseTests.java b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateResponseTests.java index c2685e45ecb6b..c00751d8ef758 100644 --- a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateResponseTests.java +++ b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateResponseTests.java @@ -234,6 +234,7 @@ public void testSearchResponseToXContent() throws IOException { .endObject() .endArray() .endObject() + .field("status", 200) .endObject(); XContentBuilder actualResponse = MediaTypeRegistry.contentBuilder(contentType); diff --git a/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/50_multi_search_template.yml b/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/50_multi_search_template.yml index e92e10b9ad276..2b99fa633e640 100644 --- a/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/50_multi_search_template.yml +++ b/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/50_multi_search_template.yml @@ -205,3 +205,51 @@ setup: - match: { responses.1.hits.total.relation: eq } - match: { responses.2.hits.total.value: 1 } - match: { responses.1.hits.total.relation: eq } + +--- +"Test multi-search template returns status code": + - skip: + version: " - 2.17.99" + reason: Fixed in 2.18.0. + - do: + msearch_template: + rest_total_hits_as_int: true + body: + # Search 0 is OK + - index: index_* + - source: '{"query": {"match": {"foo": "{{value}}"} } }' + params: + value: "foo" + # Search 1 has an unclosed JSON template + - index: index_* + - source: '{"query": {"match": {' + params: + field: "foo" + value: "bar" + # Search 2 is OK + - index: _all + - source: + query: + match: {foo: "{{text}}"} + params: + text: "baz" + # Search 3 has an unknown query type + - index: index_* + - source: '{"query": {"{{query_type}}": {} }' # Unknown query type + params: + query_type: "unknown" + # Search 4 has an unsupported track_total_hits + - index: index_* + - source: '{"query": {"match_all": {} }, "track_total_hits": "{{trackTotalHits}}" }' + params: + trackTotalHits: 1 + # Search 5 has an unknown index + - index: unknown_index + - source: '{"query": {"match_all": {} } }' + + - match: { responses.0.status: 200 } + - match: { responses.1.status: 400 } + - match: { responses.2.status: 200 } + - match: { responses.3.status: 400 } + - match: { responses.4.status: 400 } + - match: { responses.5.status: 404 } From b5dcde37b5b6122d7d8533a7aa10920d62c42ea0 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Mon, 14 Oct 2024 18:40:27 +0530 Subject: [PATCH 07/19] [Snapshot V2] Move timestamp pinning before cluster state update (#16269) * Move timestamp pinning before cluster state update Signed-off-by: Gaurav Bafna * Address PR Comments Signed-off-by: Gaurav Bafna * Fix IT Signed-off-by: Gaurav Bafna --------- Signed-off-by: Gaurav Bafna --- .../remotestore/RemoteRestoreSnapshotIT.java | 27 ++-- .../snapshots/SnapshotsService.java | 149 ++++++++---------- 2 files changed, 80 insertions(+), 96 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 927dbf9995778..70e283791fc3e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -938,17 +938,8 @@ public void testConcurrentSnapshotV2CreateOperation() throws InterruptedExceptio Thread thread = new Thread(() -> { try { String snapshotName = "snapshot-concurrent-" + snapshotIndex; - CreateSnapshotResponse createSnapshotResponse2 = client().admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName) - .setWaitForCompletion(true) - .get(); - SnapshotInfo snapshotInfo = createSnapshotResponse2.getSnapshotInfo(); - assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); - assertThat(snapshotInfo.successfulShards(), greaterThan(0)); - assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); - assertThat(snapshotInfo.snapshotId().getName(), equalTo(snapshotName)); - assertThat(snapshotInfo.getPinnedTimestamp(), greaterThan(0L)); + client().admin().cluster().prepareCreateSnapshot(snapshotRepoName, snapshotName).setWaitForCompletion(true).get(); + logger.info("Snapshot completed {}", snapshotName); } catch (Exception e) {} }); threads.add(thread); @@ -963,15 +954,19 @@ public void testConcurrentSnapshotV2CreateOperation() throws InterruptedExceptio thread.join(); } - // Validate that only one snapshot has been created + // Sleeping 10 sec for earlier created snapshot to complete runNextQueuedOperation and be ready for next snapshot + // We can't put `waitFor` since we don't have visibility on its completion + Thread.sleep(TimeValue.timeValueSeconds(10).seconds()); + client().admin().cluster().prepareCreateSnapshot(snapshotRepoName, "snapshot-cleanup-timestamp").setWaitForCompletion(true).get(); Repository repository = internalCluster().getInstance(RepositoriesService.class).repository(snapshotRepoName); PlainActionFuture repositoryDataPlainActionFuture = new PlainActionFuture<>(); repository.getRepositoryData(repositoryDataPlainActionFuture); - RepositoryData repositoryData = repositoryDataPlainActionFuture.get(); - assertThat(repositoryData.getSnapshotIds().size(), greaterThanOrEqualTo(1)); - forceSyncPinnedTimestamps(); - assertEquals(RemoteStorePinnedTimestampService.getPinnedEntities().size(), repositoryData.getSnapshotIds().size()); + assertThat(repositoryData.getSnapshotIds().size(), greaterThanOrEqualTo(2)); + waitUntil(() -> { + forceSyncPinnedTimestamps(); + return RemoteStorePinnedTimestampService.getPinnedEntities().size() == repositoryData.getSnapshotIds().size(); + }); } public void testConcurrentSnapshotV2CreateOperation_MasterChange() throws Exception { diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 2df094ebe540b..6688c7dd0431a 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -79,6 +79,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; +import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; import org.opensearch.common.collect.Tuple; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; @@ -466,34 +467,35 @@ public TimeValue timeout() { * @param listener snapshot creation listener */ public void createSnapshotV2(final CreateSnapshotRequest request, final ActionListener listener) { - long pinnedTimestamp = System.currentTimeMillis(); final String repositoryName = request.repository(); final String snapshotName = indexNameExpressionResolver.resolveDateMathExpression(request.snapshot()); + validate(repositoryName, snapshotName); + + final SnapshotId snapshotId = new SnapshotId(snapshotName, UUIDs.randomBase64UUID()); // new UUID for the snapshot + Snapshot snapshot = new Snapshot(repositoryName, snapshotId); + long pinnedTimestamp = System.currentTimeMillis(); + try { + updateSnapshotPinnedTimestamp(snapshot, pinnedTimestamp); + } catch (Exception e) { + listener.onFailure(e); + return; + } Repository repository = repositoriesService.repository(repositoryName); - validate(repositoryName, snapshotName); repository.executeConsistentStateUpdate(repositoryData -> new ClusterStateUpdateTask(Priority.URGENT) { private SnapshotsInProgress.Entry newEntry; - - private SnapshotId snapshotId; - - private Snapshot snapshot; - boolean enteredLoop; @Override public ClusterState execute(ClusterState currentState) { // move to in progress - snapshotId = new SnapshotId(snapshotName, UUIDs.randomBase64UUID()); // new UUID for the snapshot Repository repository = repositoriesService.repository(repositoryName); - if (repository.isReadOnly()) { listener.onFailure( new RepositoryException(repository.getMetadata().name(), "cannot create snapshot-v2 in a readonly repository") ); } - snapshot = new Snapshot(repositoryName, snapshotId); final Map userMeta = repository.adaptUserMetadata(request.userMetadata()); createSnapshotPreValidations(currentState, repositoryData, repositoryName, snapshotName); @@ -593,59 +595,46 @@ public void clusterStateProcessed(String source, ClusterState oldState, final Cl pinnedTimestamp ); final Version version = minCompatibleVersion(newState.nodes().getMinNodeVersion(), repositoryData, null); - final StepListener pinnedTimestampListener = new StepListener<>(); - pinnedTimestampListener.whenComplete(repoData -> { - repository.finalizeSnapshot( - shardGenerations, - repositoryData.getGenId(), - metadataForSnapshot(newState.metadata(), request.includeGlobalState(), false, dataStreams, newEntry.indices()), - snapshotInfo, - version, - state -> stateWithoutSnapshot(state, snapshot), - Priority.IMMEDIATE, - new ActionListener() { - @Override - public void onResponse(RepositoryData repositoryData) { - if (clusterService.state().nodes().isLocalNodeElectedClusterManager() == false) { - leaveRepoLoop(repositoryName); - failSnapshotCompletionListeners( - snapshot, - new SnapshotException(snapshot, "Aborting snapshot-v2, no longer cluster manager") - ); - listener.onFailure( - new SnapshotException( - repositoryName, - snapshotName, - "Aborting snapshot-v2, no longer cluster manager" - ) - ); - return; - } - listener.onResponse(snapshotInfo); - // For snapshot-v2, we don't allow concurrent snapshots . But meanwhile non-v2 snapshot operations - // can get queued . This is triggering them. - runNextQueuedOperation(repositoryData, repositoryName, true); - cleanOrphanTimestamp(repositoryName, repositoryData); - } - - @Override - public void onFailure(Exception e) { - logger.error("Failed to finalize snapshot repo {} for snapshot-v2 {} ", repositoryName, snapshotName); + repository.finalizeSnapshot( + shardGenerations, + repositoryData.getGenId(), + metadataForSnapshot(newState.metadata(), request.includeGlobalState(), false, dataStreams, newEntry.indices()), + snapshotInfo, + version, + state -> stateWithoutSnapshot(state, snapshot), + Priority.IMMEDIATE, + new ActionListener() { + @Override + public void onResponse(RepositoryData repositoryData) { + if (clusterService.state().nodes().isLocalNodeElectedClusterManager() == false) { leaveRepoLoop(repositoryName); - // cleaning up in progress snapshot here - stateWithoutSnapshotV2(newState); - listener.onFailure(e); + failSnapshotCompletionListeners( + snapshot, + new SnapshotException(snapshot, "Aborting snapshot-v2, no longer cluster manager") + ); + listener.onFailure( + new SnapshotException(repositoryName, snapshotName, "Aborting snapshot-v2, no longer cluster manager") + ); + return; } + listener.onResponse(snapshotInfo); + logger.info("created snapshot-v2 [{}] in repository [{}]", repositoryName, snapshotName); + // For snapshot-v2, we don't allow concurrent snapshots . But meanwhile non-v2 snapshot operations + // can get queued . This is triggering them. + runNextQueuedOperation(repositoryData, repositoryName, true); + cleanOrphanTimestamp(repositoryName, repositoryData); } - ); - }, e -> { - logger.error("Failed to update pinned timestamp for snapshot-v2 {} {} {} ", repositoryName, snapshotName, e); - leaveRepoLoop(repositoryName); - // cleaning up in progress snapshot here - stateWithoutSnapshotV2(newState); - listener.onFailure(e); - }); - updateSnapshotPinnedTimestamp(repositoryData, snapshot, pinnedTimestamp, pinnedTimestampListener); + + @Override + public void onFailure(Exception e) { + logger.error("Failed to finalize snapshot repo {} for snapshot-v2 {} ", repositoryName, snapshotName); + leaveRepoLoop(repositoryName); + // cleaning up in progress snapshot here + stateWithoutSnapshotV2(newState); + listener.onFailure(e); + } + } + ); } @Override @@ -733,30 +722,30 @@ private void createSnapshotPreValidations( ensureNoCleanupInProgress(currentState, repositoryName, snapshotName); } - private void updateSnapshotPinnedTimestamp( - RepositoryData repositoryData, - Snapshot snapshot, - long timestampToPin, - ActionListener listener - ) { + private void updateSnapshotPinnedTimestamp(Snapshot snapshot, long timestampToPin) throws Exception { + CountDownLatch latch = new CountDownLatch(1); + SetOnce ex = new SetOnce<>(); + ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(Void unused) { + logger.debug("Timestamp pinned successfully for snapshot {}", snapshot.getSnapshotId().getName()); + } + + @Override + public void onFailure(Exception e) { + logger.error("Failed to pin timestamp for snapshot {} with exception {}", snapshot.getSnapshotId().getName(), e); + ex.set(e); + } + }; remoteStorePinnedTimestampService.pinTimestamp( timestampToPin, getPinningEntity(snapshot.getRepository(), snapshot.getSnapshotId().getUUID()), - new ActionListener() { - @Override - public void onResponse(Void unused) { - logger.debug("Timestamp pinned successfully for snapshot {}", snapshot.getSnapshotId().getName()); - listener.onResponse(repositoryData); - } - - @Override - public void onFailure(Exception e) { - logger.error("Failed to pin timestamp for snapshot {} with exception {}", snapshot.getSnapshotId().getName(), e); - listener.onFailure(e); - - } - } + new LatchedActionListener<>(listener, latch) ); + latch.await(); + if (ex.get() != null) { + throw ex.get(); + } } public static String getPinningEntity(String repositoryName, String snapshotUUID) { From 7065e6b0bc89028a742e89f30361c60c7c8a9da2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 10:28:24 -0400 Subject: [PATCH 08/19] Bump ch.qos.logback:logback-core from 1.5.8 to 1.5.10 in /test/fixtures/hdfs-fixture (#16307) * Bump ch.qos.logback:logback-core in /test/fixtures/hdfs-fixture Bumps [ch.qos.logback:logback-core](https://github.com/qos-ch/logback) from 1.5.8 to 1.5.10. - [Commits](https://github.com/qos-ch/logback/compare/v_1.5.8...v_1.5.10) --- updated-dependencies: - dependency-name: ch.qos.logback:logback-core dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 2 +- test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bc6c8b024c692..84aae800bb50a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,7 +32,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `peter-evans/create-pull-request` from 6 to 7 ([#15863](https://github.com/opensearch-project/OpenSearch/pull/15863)) - Bump `com.nimbusds:oauth2-oidc-sdk` from 11.9.1 to 11.19.1 ([#15862](https://github.com/opensearch-project/OpenSearch/pull/15862)) - Bump `com.microsoft.azure:msal4j` from 1.17.0 to 1.17.1 ([#15945](https://github.com/opensearch-project/OpenSearch/pull/15945)) -- Bump `ch.qos.logback:logback-core` from 1.5.6 to 1.5.8 ([#15946](https://github.com/opensearch-project/OpenSearch/pull/15946)) +- Bump `ch.qos.logback:logback-core` from 1.5.6 to 1.5.10 ([#15946](https://github.com/opensearch-project/OpenSearch/pull/15946), [#16307](https://github.com/opensearch-project/OpenSearch/pull/16307)) - Update protobuf from 3.25.4 to 3.25.5 ([#16011](https://github.com/opensearch-project/OpenSearch/pull/16011)) - Bump `org.roaringbitmap:RoaringBitmap` from 1.2.1 to 1.3.0 ([#16040](https://github.com/opensearch-project/OpenSearch/pull/16040)) - Bump `com.nimbusds:nimbus-jose-jwt` from 9.40 to 9.41.1 ([#16038](https://github.com/opensearch-project/OpenSearch/pull/16038)) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index b3a90b5de2589..65fb55afbc7bd 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -74,7 +74,7 @@ dependencies { api 'org.apache.zookeeper:zookeeper:3.9.2' api "org.apache.commons:commons-text:1.12.0" api "commons-net:commons-net:3.11.1" - api "ch.qos.logback:logback-core:1.5.8" + api "ch.qos.logback:logback-core:1.5.10" api "ch.qos.logback:logback-classic:1.2.13" api "org.jboss.xnio:xnio-nio:3.8.16.Final" api 'org.jline:jline:3.27.0' From f4bf0dae6ad08655bf68da73deeb3eb55563608c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 10:33:38 -0400 Subject: [PATCH 09/19] Bump me.champeau.gradle.japicmp from 0.4.3 to 0.4.4 in /server (#16309) * Bump me.champeau.gradle.japicmp from 0.4.3 to 0.4.4 in /server Bumps me.champeau.gradle.japicmp from 0.4.3 to 0.4.4. --- updated-dependencies: - dependency-name: me.champeau.gradle.japicmp dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + server/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 84aae800bb50a..efb50db28a4a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.azure:azure-json` from 1.1.0 to 1.3.0 ([#16217](https://github.com/opensearch-project/OpenSearch/pull/16217)) - Bump `io.grpc:grpc-api` from 1.57.2 to 1.68.0 ([#16213](https://github.com/opensearch-project/OpenSearch/pull/16213)) - Bump `com.squareup.okio:okio` from 3.9.0 to 3.9.1 ([#16212](https://github.com/opensearch-project/OpenSearch/pull/16212)) +- Bump `me.champeau.gradle.japicmp` from 0.4.3 to 0.4.4 ([#16309](https://github.com/opensearch-project/OpenSearch/pull/16309)) ### Changed - Add support for docker compose v2 in TestFixturesPlugin ([#16049](https://github.com/opensearch-project/OpenSearch/pull/16049)) diff --git a/server/build.gradle b/server/build.gradle index eccaf8a127647..c19e171c90f96 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -36,7 +36,7 @@ plugins { id('opensearch.publish') id('opensearch.internal-cluster-test') id('opensearch.optional-dependencies') - id('me.champeau.gradle.japicmp') version '0.4.3' + id('me.champeau.gradle.japicmp') version '0.4.4' } publishing { From 0ff0439dce988344c76ec0d68643bef528c652b6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 11:41:55 -0400 Subject: [PATCH 10/19] Bump com.google.oauth-client:google-oauth-client from 1.35.0 to 1.36.0 in /plugins/discovery-gce (#16306) * Bump com.google.oauth-client:google-oauth-client Bumps [com.google.oauth-client:google-oauth-client](https://github.com/googleapis/google-oauth-java-client) from 1.35.0 to 1.36.0. - [Release notes](https://github.com/googleapis/google-oauth-java-client/releases) - [Changelog](https://github.com/googleapis/google-oauth-java-client/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/google-oauth-java-client/compare/v1.35.0...v1.36.0) --- updated-dependencies: - dependency-name: com.google.oauth-client:google-oauth-client dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/discovery-gce/build.gradle | 2 +- .../discovery-gce/licenses/google-oauth-client-1.35.0.jar.sha1 | 1 - .../discovery-gce/licenses/google-oauth-client-1.36.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/discovery-gce/licenses/google-oauth-client-1.35.0.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/google-oauth-client-1.36.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index efb50db28a4a4..a99c012bfc6d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `io.grpc:grpc-api` from 1.57.2 to 1.68.0 ([#16213](https://github.com/opensearch-project/OpenSearch/pull/16213)) - Bump `com.squareup.okio:okio` from 3.9.0 to 3.9.1 ([#16212](https://github.com/opensearch-project/OpenSearch/pull/16212)) - Bump `me.champeau.gradle.japicmp` from 0.4.3 to 0.4.4 ([#16309](https://github.com/opensearch-project/OpenSearch/pull/16309)) +- Bump `com.google.oauth-client:google-oauth-client` from 1.35.0 to 1.36.0 ([#16306](https://github.com/opensearch-project/OpenSearch/pull/16306)) ### Changed - Add support for docker compose v2 in TestFixturesPlugin ([#16049](https://github.com/opensearch-project/OpenSearch/pull/16049)) diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index a08fa1d968e30..76beb78bf533c 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -20,7 +20,7 @@ opensearchplugin { dependencies { api "com.google.apis:google-api-services-compute:v1-rev20240407-2.0.0" api "com.google.api-client:google-api-client:1.35.2" - api "com.google.oauth-client:google-oauth-client:1.35.0" + api "com.google.oauth-client:google-oauth-client:1.36.0" api "com.google.http-client:google-http-client:${versions.google_http_client}" api "com.google.http-client:google-http-client-gson:${versions.google_http_client}" api "com.google.http-client:google-http-client-jackson2:${versions.google_http_client}" diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.35.0.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.35.0.jar.sha1 deleted file mode 100644 index a52e79088c7ca..0000000000000 --- a/plugins/discovery-gce/licenses/google-oauth-client-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2f52003156e40ba8be5f349a2716a77428896e69 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.36.0.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..25aa7d76f153a --- /dev/null +++ b/plugins/discovery-gce/licenses/google-oauth-client-1.36.0.jar.sha1 @@ -0,0 +1 @@ +dc3f07bc8f49dd52fe8fcc15958f3cfeb003e20f \ No newline at end of file From 32c1a4370ce394fa9ae50835de626a24e1e81b2c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 11:19:13 -0500 Subject: [PATCH 11/19] Bump lycheeverse/lychee-action from 1.10.0 to 2.0.2 (#16310) * Bump lycheeverse/lychee-action from 1.10.0 to 2.0.2 Bumps [lycheeverse/lychee-action](https://github.com/lycheeverse/lychee-action) from 1.10.0 to 2.0.2. - [Release notes](https://github.com/lycheeverse/lychee-action/releases) - [Commits](https://github.com/lycheeverse/lychee-action/compare/v1.10.0...v2.0.2) --- updated-dependencies: - dependency-name: lycheeverse/lychee-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- .github/workflows/links.yml | 2 +- CHANGELOG.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 8f628fcd78148..cadbe71bb6ea8 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -13,7 +13,7 @@ jobs: - uses: actions/checkout@v4 - name: lychee Link Checker id: lychee - uses: lycheeverse/lychee-action@v1.10.0 + uses: lycheeverse/lychee-action@v2.0.2 with: args: --accept=200,403,429 --exclude-mail **/*.html **/*.md **/*.txt **/*.json --exclude-file .lychee.excludes fail: true diff --git a/CHANGELOG.md b/CHANGELOG.md index a99c012bfc6d2..1523d0496dace 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,6 +50,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.squareup.okio:okio` from 3.9.0 to 3.9.1 ([#16212](https://github.com/opensearch-project/OpenSearch/pull/16212)) - Bump `me.champeau.gradle.japicmp` from 0.4.3 to 0.4.4 ([#16309](https://github.com/opensearch-project/OpenSearch/pull/16309)) - Bump `com.google.oauth-client:google-oauth-client` from 1.35.0 to 1.36.0 ([#16306](https://github.com/opensearch-project/OpenSearch/pull/16306)) +- Bump `lycheeverse/lychee-action` from 1.10.0 to 2.0.2 ([#16310](https://github.com/opensearch-project/OpenSearch/pull/16310)) ### Changed - Add support for docker compose v2 in TestFixturesPlugin ([#16049](https://github.com/opensearch-project/OpenSearch/pull/16049)) From 931339e38be8f29281501a5ac8f0dddf2aa2232d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 13:28:05 -0400 Subject: [PATCH 12/19] Bump com.azure:azure-core-http-netty from 1.15.4 to 1.15.5 in /plugins/repository-azure (#16311) * Bump com.azure:azure-core-http-netty in /plugins/repository-azure Bumps [com.azure:azure-core-http-netty](https://github.com/Azure/azure-sdk-for-java) from 1.15.4 to 1.15.5. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-core-http-netty_1.15.4...azure-core-http-netty_1.15.5) --- updated-dependencies: - dependency-name: com.azure:azure-core-http-netty dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 2 +- plugins/repository-azure/build.gradle | 2 +- .../licenses/azure-core-http-netty-1.15.4.jar.sha1 | 1 - .../licenses/azure-core-http-netty-1.15.5.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-core-http-netty-1.15.4.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 1523d0496dace..4c279b27a156b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,7 +41,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.maxmind.geoip2:geoip2` from 4.2.0 to 4.2.1 ([#16042](https://github.com/opensearch-project/OpenSearch/pull/16042)) - Bump `com.maxmind.db:maxmind-db` from 3.1.0 to 3.1.1 ([#16137](https://github.com/opensearch-project/OpenSearch/pull/16137)) - Bump Apache lucene from 9.11.1 to 9.12.0 ([#15333](https://github.com/opensearch-project/OpenSearch/pull/15333)) -- Bump `com.azure:azure-core-http-netty` from 1.15.3 to 1.15.4 ([#16133](https://github.com/opensearch-project/OpenSearch/pull/16133)) +- Bump `com.azure:azure-core-http-netty` from 1.15.3 to 1.15.5 ([#16133](https://github.com/opensearch-project/OpenSearch/pull/16133), [#16311](https://github.com/opensearch-project/OpenSearch/pull/16311)) - Bump `org.jline:jline` from 3.26.3 to 3.27.0 ([#16135](https://github.com/opensearch-project/OpenSearch/pull/16135)) - Bump `netty` from 4.1.112.Final to 4.1.114.Final ([#16182](https://github.com/opensearch-project/OpenSearch/pull/16182)) - Bump `com.google.api-client:google-api-client` from 2.2.0 to 2.7.0 ([#16216](https://github.com/opensearch-project/OpenSearch/pull/16216)) diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index e2d1c15cdb36f..d7eebe70ec303 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -48,7 +48,7 @@ dependencies { api 'com.azure:azure-json:1.3.0' api 'com.azure:azure-xml:1.1.0' api 'com.azure:azure-storage-common:12.25.1' - api 'com.azure:azure-core-http-netty:1.15.4' + api 'com.azure:azure-core-http-netty:1.15.5' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" api "io.netty:netty-codec-http2:${versions.netty}" diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.4.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.4.jar.sha1 deleted file mode 100644 index 97e6fad264294..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -489a38c9e6efb5ce01fbd276d8cb6c0e89000459 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1 new file mode 100644 index 0000000000000..2f5239cc26148 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1 @@ -0,0 +1 @@ +44d99705d3759e2ad7ee8110f811d4ed304a6a7c \ No newline at end of file From 783d3e1850e156161cb64519c2ab471290c3791a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 13:31:58 -0400 Subject: [PATCH 13/19] Bump com.google.code.gson:gson from 2.10.1 to 2.11.0 in /plugins/repository-gcs (#16308) * Bump com.google.code.gson:gson in /plugins/repository-gcs Bumps [com.google.code.gson:gson](https://github.com/google/gson) from 2.10.1 to 2.11.0. - [Release notes](https://github.com/google/gson/releases) - [Changelog](https://github.com/google/gson/blob/main/CHANGELOG.md) - [Commits](https://github.com/google/gson/compare/gson-parent-2.10.1...gson-parent-2.11.0) --- updated-dependencies: - dependency-name: com.google.code.gson:gson dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-gcs/build.gradle | 2 +- plugins/repository-gcs/licenses/gson-2.10.1.jar.sha1 | 1 - plugins/repository-gcs/licenses/gson-2.11.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/gson-2.10.1.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/gson-2.11.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 4c279b27a156b..e60c078f5ed3a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,6 +51,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `me.champeau.gradle.japicmp` from 0.4.3 to 0.4.4 ([#16309](https://github.com/opensearch-project/OpenSearch/pull/16309)) - Bump `com.google.oauth-client:google-oauth-client` from 1.35.0 to 1.36.0 ([#16306](https://github.com/opensearch-project/OpenSearch/pull/16306)) - Bump `lycheeverse/lychee-action` from 1.10.0 to 2.0.2 ([#16310](https://github.com/opensearch-project/OpenSearch/pull/16310)) +- Bump `com.google.code.gson:gson` from 2.10.1 to 2.11.0 ([#16308](https://github.com/opensearch-project/OpenSearch/pull/16308)) ### Changed - Add support for docker compose v2 in TestFixturesPlugin ([#16049](https://github.com/opensearch-project/OpenSearch/pull/16049)) diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index ab129ab7f116a..b90bcc7f822d1 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -70,7 +70,7 @@ dependencies { api 'com.google.cloud:google-cloud-core-http:2.23.0' api 'com.google.cloud:google-cloud-storage:1.113.1' - api 'com.google.code.gson:gson:2.10.1' + api 'com.google.code.gson:gson:2.11.0' runtimeOnly "com.google.guava:guava:${versions.guava}" api 'com.google.guava:failureaccess:1.0.1' diff --git a/plugins/repository-gcs/licenses/gson-2.10.1.jar.sha1 b/plugins/repository-gcs/licenses/gson-2.10.1.jar.sha1 deleted file mode 100644 index 9810309d1013a..0000000000000 --- a/plugins/repository-gcs/licenses/gson-2.10.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b3add478d4382b78ea20b1671390a858002feb6c \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gson-2.11.0.jar.sha1 b/plugins/repository-gcs/licenses/gson-2.11.0.jar.sha1 new file mode 100644 index 0000000000000..0414a49526895 --- /dev/null +++ b/plugins/repository-gcs/licenses/gson-2.11.0.jar.sha1 @@ -0,0 +1 @@ +527175ca6d81050b53bdd4c457a6d6e017626b0e \ No newline at end of file From 88d13eb5bd3bbea3a04b7b0c7576ca00d0f8435c Mon Sep 17 00:00:00 2001 From: David Zane <38449481+dzane17@users.noreply.github.com> Date: Mon, 14 Oct 2024 11:29:57 -0700 Subject: [PATCH 14/19] Enable coordinator search.request_stats_enabled by default (#16290) Signed-off-by: David Zane Signed-off-by: Ankit Jain Co-authored-by: Ankit Jain --- CHANGELOG.md | 1 + .../java/org/opensearch/action/search/SearchRequestStats.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e60c078f5ed3a..00664189ced1c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remove identity-related feature flagged code from the RestController ([#15430](https://github.com/opensearch-project/OpenSearch/pull/15430)) - Remove Identity FeatureFlag ([#16024](https://github.com/opensearch-project/OpenSearch/pull/16024)) - Ensure RestHandler.Wrapper delegates all implementations to the wrapped handler ([#16154](https://github.com/opensearch-project/OpenSearch/pull/16154)) +- Enable coordinator search.request_stats_enabled by default ([#16290](https://github.com/opensearch-project/OpenSearch/pull/16290)) - Code cleanup: Remove ApproximateIndexOrDocValuesQuery ([#16273](https://github.com/opensearch-project/OpenSearch/pull/16273)) - Optimise clone operation for incremental full cluster snapshots ([#16296](https://github.com/opensearch-project/OpenSearch/pull/16296)) diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java index d1d5f568fc09d..94200d29a4f21 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java @@ -32,7 +32,7 @@ public final class SearchRequestStats extends SearchRequestOperationsListener { public static final String SEARCH_REQUEST_STATS_ENABLED_KEY = "search.request_stats_enabled"; public static final Setting SEARCH_REQUEST_STATS_ENABLED = Setting.boolSetting( SEARCH_REQUEST_STATS_ENABLED_KEY, - false, + true, Setting.Property.Dynamic, Setting.Property.NodeScope ); From 9ddee61b1b4eafebe9b4d30e997b40178c939a5e Mon Sep 17 00:00:00 2001 From: kkewwei Date: Tue, 15 Oct 2024 04:54:01 +0800 Subject: [PATCH 15/19] Flat object field should delegate to keyword field for most query types (#14383) Signed-off-by: kkewwei --- CHANGELOG.md | 1 + .../test/index/90_flat_object.yml | 32 - .../92_flat_object_support_doc_values.yml | 788 +++++++++++++ .../xcontent/JsonToStringXContentParser.java | 8 +- .../index/mapper/FlatObjectFieldMapper.java | 310 +++-- .../index/mapper/KeywordFieldMapper.java | 62 +- .../mapper/FlatObjectFieldMapperTests.java | 45 +- .../mapper/FlatObjectFieldTypeTests.java | 1002 ++++++++++++++++- 8 files changed, 1990 insertions(+), 258 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index 00664189ced1c..9665133e91207 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - New `phone` & `phone-search` analyzer + tokenizer ([#15915](https://github.com/opensearch-project/OpenSearch/pull/15915)) - Add _list/shards API as paginated alternate to _cat/shards ([#14641](https://github.com/opensearch-project/OpenSearch/pull/14641)) - Latency and Memory allocation improvements to Multi Term Aggregation queries ([#14993](https://github.com/opensearch-project/OpenSearch/pull/14993)) +- Flat object field use IndexOrDocValuesQuery to optimize query ([#14383](https://github.com/opensearch-project/OpenSearch/issues/14383)) ### Dependencies - Bump `com.azure:azure-identity` from 1.13.0 to 1.13.2 ([#15578](https://github.com/opensearch-project/OpenSearch/pull/15578)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml index e8da81d7bee41..2a469aa5ff04d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml @@ -671,38 +671,6 @@ teardown: - match: { error.root_cause.0.reason: "Mapping definition for [data] has unsupported parameters: [analyzer : standard]"} - match: { status: 400 } - # Wildcard Query with dot path. - - do: - catch: bad_request - search: - body: { - _source: true, - query: { - "wildcard": { - "catalog.title": "Mock*" - } - } - } - - match: { error.root_cause.0.type: "query_shard_exception" } - - match: { error.root_cause.0.reason: "Can only use wildcard queries on keyword and text fields - not on [catalog.title] which is of type [flat_object]"} - - match: { status: 400 } - - # Wildcard Query without dot path. - - do: - catch: bad_request - search: - body: { - _source: true, - query: { - "wildcard": { - "catalog": "Mock*" - } - } - } - - match: { error.root_cause.0.type: "query_shard_exception" } - - match: { error.root_cause.0.reason: "Can only use wildcard queries on keyword and text fields - not on [catalog] which is of type [flat_object]" } - - match: { status: 400 } - # Aggregation and Match Query with dot path. - do: catch: bad_request diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml new file mode 100644 index 0000000000000..9ec39660a4928 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml @@ -0,0 +1,788 @@ +--- +# The test setup includes: +# - Create flat_object mapping for flat_object_doc_values_test index +# - Index 9 example documents +# - Search tests about doc_values and index + +setup: + - skip: + version: " - 2.99.99" + reason: "introduced in 3.0.0 " + + - do: + indices.create: + index: flat_object_doc_values_test + body: + mappings: + properties: + issue: + properties: + labels: + type: "flat_object" + order: + type: "keyword" + + - do: + bulk: + refresh: true + body: | + {"index":{"_index":"flat_object_doc_values_test","_id":"0"}} + {"order":"order0","issue":{"labels":{"number":1,"name":"abc0","status":1}}} + {"index":{"_index":"flat_object_doc_values_test","_id":"1"}} + {"order":"order1","issue":{"labels":{"number":2,"name":"abc1","status":1}}} + {"index":{"_index":"flat_object_doc_values_test","_id":"2"}} + {"order":"order2","issue":{"labels":{"number":2,"name":"abc2","status":1}}} + {"index":{"_index":"flat_object_doc_values_test","_id":"3"}} + {"order":"order3","issue":{"labels":{"number":3,"name":"abc3","status":1}}} + {"index":{"_index":"flat_object_doc_values_test","_id":"4"}} + {"order":"order4","issue":{"labels":{"number":4,"name":"abc4","status":1}}} + {"index":{"_index":"flat_object_doc_values_test","_id":"5"}} + {"order":"order5","issue":{"labels":{"number":5,"name":"abc5","status":1}}} + {"index":{"_index":"flat_object_doc_values_test","_id":"6"}} + {"order":"order6","issue":{"labels":{"number":6,"name":"abc6","status":1}}} + {"index":{"_index":"flat_object_doc_values_test","_id":"7"}} + {"order":"order7","issue":{"labels":{"number":7,"name":"abc7","status":1}}} + {"index":{"_index":"flat_object_doc_values_test","_id":"8"}} + {"order":"order8","issue":{"labels":{"number":8,"name":"abc8","status":1}}} + +--- +# Delete Index when connection is teardown +teardown: + - do: + indices.delete: + index: flat_object_doc_values_test + +--- +"Supported queries": + - skip: + version: " - 2.99.99" + reason: "introduced in 3.0.0 " + + # Verify Document Count + - do: + search: + body: { + query: { + match_all: { } + } + } + + - length: { hits.hits: 9 } + + # Term Query with exact dot path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + order: "order8" + } + }, + { + term: { + issue.labels.status: 1 + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + issue.labels.name: "abc8" + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + # Term Query with no path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + order: "order8" + } + }, + { + term: { + issue.labels: 1 + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + issue.labels: "abc8" + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + # Terms Query with exact dot path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + order: "order8" + } + }, + { + terms: { + issue.labels.status: [0,1] + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + terms: { + issue.labels.name: ["abc8"] + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + # Terms Query with no path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + order: "order8" + } + }, + { + terms: { + issue.labels: [ 0,1 ] + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + terms: { + issue.labels.name: ["abc8"] + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + # Prefix Query with exact dot path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + order: "order8" + } + }, + { + prefix: { + issue.labels.name: "ab" + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + prefix: { + issue.labels.name: "abc8" + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + # Prefix Query with no path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + order: "order8" + } + }, + { + prefix: { + issue.labels: "ab" + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + prefix: { + issue.labels: "abc8" + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + # Regexp Query with exact dot path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + order: "order8" + } + }, + { + regexp: { + issue.labels.name: "ab.*" + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + - match: { hits.hits.0._source.issue.labels.name: "abc8" } + + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + regexp: { + issue.labels.name: "a.*c8" + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + # Regexp Query with no path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + order: "order8" + } + }, + { + regexp: { + issue.labels: "ab.*" + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + regexp: { + issue.labels: "a.*c8" + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + # Fuzzy Query with exact dot path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + order: "order8" + } + }, + { + fuzzy: { + issue.labels.name: { + value: "abcx", + fuzziness: 1 + } + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + fuzzy: { + issue.labels.name: { + value: "abc8", + fuzziness: 0 + } + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + # Fuzzy Query with no path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + order: "order8" + } + }, + { + fuzzy: { + issue.labels: { + value: "abcx", + fuzziness: 1 + } + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + fuzzy: { + issue.labels: { + value: "abc8", + fuzziness: 0 + } + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + # Range Query with exact dot path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + order: "order8" + } + }, + { + range: { + issue.labels.status: { + from: 0 + } + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + range: { + issue.labels.name: { + from: "abc8" + } + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + # Range Query with no path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + order: "order8" + } + }, + { + range: { + issue.labels: { + from: 0 + } + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + range: { + issue.labels: { + from: "abc8" + } + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + # Exists Query with exact dot path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + order: "order8" + } + }, + { + exists: { + field: "issue.labels.status" + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + # Exists Query with no path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + order: "order8" + } + }, + { + exists: { + field: "issue.labels" + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + + # Wildcard Query with exact dot path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + order: "order8" + } + }, + { + wildcard: { + issue.labels.name: "abc*" + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + wildcard: { + issue.labels.name: "abc8*" + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + # Wildcard Query with no path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + order: "order8" + } + }, + { + wildcard: { + issue.labels: "abc*" + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } + + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + wildcard: { + issue.labels: "abc8*" + } + } + ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.order: "order8" } diff --git a/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java b/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java index 95a8d9c9495f2..21270b4241b15 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java +++ b/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java @@ -37,6 +37,10 @@ * @opensearch.internal */ public class JsonToStringXContentParser extends AbstractXContentParser { + public static final String VALUE_AND_PATH_SUFFIX = "._valueAndPath"; + public static final String VALUE_SUFFIX = "._value"; + public static final String DOT_SYMBOL = "."; + public static final String EQUAL_SYMBOL = "="; private final String fieldTypeName; private final XContentParser parser; @@ -50,10 +54,6 @@ public class JsonToStringXContentParser extends AbstractXContentParser { private final DeprecationHandler deprecationHandler; - private static final String VALUE_AND_PATH_SUFFIX = "._valueAndPath"; - private static final String VALUE_SUFFIX = "._value"; - private static final String EQUAL_SYMBOL = "="; - public JsonToStringXContentParser( NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java index 738efcfafdca1..0ccdb40f9d33a 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java @@ -16,19 +16,16 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.MultiTermQuery; -import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.util.BytesRef; -import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.collect.Iterators; import org.opensearch.common.lucene.Lucene; -import org.opensearch.common.lucene.search.AutomatonQueries; +import org.opensearch.common.unit.Fuzziness; import org.opensearch.common.xcontent.JsonToStringXContentParser; import org.opensearch.core.common.ParsingException; import org.opensearch.core.xcontent.DeprecationHandler; @@ -37,8 +34,8 @@ import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; +import org.opensearch.index.mapper.KeywordFieldMapper.KeywordFieldType; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.index.query.QueryShardException; import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.lookup.SearchLookup; @@ -52,7 +49,11 @@ import java.util.function.BiFunction; import java.util.function.Supplier; -import static org.opensearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; +import static org.opensearch.common.xcontent.JsonToStringXContentParser.DOT_SYMBOL; +import static org.opensearch.common.xcontent.JsonToStringXContentParser.EQUAL_SYMBOL; +import static org.opensearch.common.xcontent.JsonToStringXContentParser.VALUE_AND_PATH_SUFFIX; +import static org.opensearch.common.xcontent.JsonToStringXContentParser.VALUE_SUFFIX; +import static org.opensearch.index.mapper.FlatObjectFieldMapper.FlatObjectFieldType.getKeywordFieldType; /** * A field mapper for flat_objects. @@ -62,10 +63,6 @@ public final class FlatObjectFieldMapper extends DynamicKeyFieldMapper { public static final String CONTENT_TYPE = "flat_object"; - private static final String VALUE_AND_PATH_SUFFIX = "._valueAndPath"; - private static final String VALUE_SUFFIX = "._value"; - private static final String DOT_SYMBOL = "."; - private static final String EQUAL_SYMBOL = "="; /** * In flat_object field mapper, field type is similar to keyword field type @@ -86,7 +83,12 @@ public static class Defaults { @Override public MappedFieldType keyedFieldType(String key) { - return new FlatObjectFieldType(this.name() + DOT_SYMBOL + key, this.name()); + return new FlatObjectFieldType( + this.name() + DOT_SYMBOL + key, + this.name(), + (KeywordFieldType) valueFieldMapper.fieldType(), + (KeywordFieldType) valueAndPathFieldMapper.fieldType() + ); } /** @@ -111,20 +113,12 @@ public Builder(String name) { builder = this; } - private FlatObjectFieldType buildFlatObjectFieldType(BuilderContext context, FieldType fieldType) { - return new FlatObjectFieldType(buildFullName(context), fieldType); - } - /** * ValueFieldMapper is the subfield type for values in the Json. * use a {@link KeywordFieldMapper.KeywordField} */ - private ValueFieldMapper buildValueFieldMapper(BuilderContext context, FieldType fieldType, FlatObjectFieldType fft) { - String fullName = buildFullName(context); + private ValueFieldMapper buildValueFieldMapper(FieldType fieldType, KeywordFieldType valueFieldType) { FieldType vft = new FieldType(fieldType); - KeywordFieldMapper.KeywordFieldType valueFieldType = new KeywordFieldMapper.KeywordFieldType(fullName + VALUE_SUFFIX, vft); - - fft.setValueFieldType(valueFieldType); return new ValueFieldMapper(vft, valueFieldType); } @@ -132,27 +126,30 @@ private ValueFieldMapper buildValueFieldMapper(BuilderContext context, FieldType * ValueAndPathFieldMapper is the subfield type for path=value format in the Json. * also use a {@link KeywordFieldMapper.KeywordField} */ - private ValueAndPathFieldMapper buildValueAndPathFieldMapper(BuilderContext context, FieldType fieldType, FlatObjectFieldType fft) { - String fullName = buildFullName(context); + private ValueAndPathFieldMapper buildValueAndPathFieldMapper(FieldType fieldType, KeywordFieldType valueAndPathFieldType) { FieldType vft = new FieldType(fieldType); - KeywordFieldMapper.KeywordFieldType ValueAndPathFieldType = new KeywordFieldMapper.KeywordFieldType( - fullName + VALUE_AND_PATH_SUFFIX, - vft - ); - fft.setValueAndPathFieldType(ValueAndPathFieldType); - return new ValueAndPathFieldMapper(vft, ValueAndPathFieldType); + return new ValueAndPathFieldMapper(vft, valueAndPathFieldType); } @Override public FlatObjectFieldMapper build(BuilderContext context) { - FieldType fieldtype = new FieldType(Defaults.FIELD_TYPE); - FlatObjectFieldType fft = buildFlatObjectFieldType(context, fieldtype); + boolean isSearchable = true; + boolean hasDocValue = true; + KeywordFieldType valueFieldType = getKeywordFieldType(buildFullName(context), VALUE_SUFFIX, isSearchable, hasDocValue); + KeywordFieldType valueAndPathFieldType = getKeywordFieldType( + buildFullName(context), + VALUE_AND_PATH_SUFFIX, + isSearchable, + hasDocValue + ); + FlatObjectFieldType fft = new FlatObjectFieldType(buildFullName(context), null, valueFieldType, valueAndPathFieldType); + return new FlatObjectFieldMapper( name, Defaults.FIELD_TYPE, fft, - buildValueFieldMapper(context, fieldtype, fft), - buildValueAndPathFieldMapper(context, fieldtype, fft), + buildValueFieldMapper(Defaults.FIELD_TYPE, valueFieldType), + buildValueAndPathFieldMapper(Defaults.FIELD_TYPE, valueAndPathFieldType), CopyTo.empty(), this ); @@ -189,66 +186,70 @@ public static final class FlatObjectFieldType extends StringFieldType { private final String mappedFieldTypeName; - private KeywordFieldMapper.KeywordFieldType valueFieldType; + private final KeywordFieldType valueFieldType; - private KeywordFieldMapper.KeywordFieldType valueAndPathFieldType; - - public FlatObjectFieldType(String name, boolean isSearchable, boolean hasDocValues, Map meta) { - super(name, isSearchable, false, true, TextSearchInfo.SIMPLE_MATCH_ONLY, meta); - setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); - this.ignoreAbove = Integer.MAX_VALUE; - this.nullValue = null; - this.mappedFieldTypeName = null; - } + private final KeywordFieldType valueAndPathFieldType; - public FlatObjectFieldType(String name, FieldType fieldType) { + public FlatObjectFieldType( + String name, + String mappedFieldTypeName, + boolean isSearchable, + boolean hasDocValues, + NamedAnalyzer analyzer, + Map meta + ) { super( name, - fieldType.indexOptions() != IndexOptions.NONE, + isSearchable, false, - true, - new TextSearchInfo(fieldType, null, Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER), - Collections.emptyMap() + hasDocValues, + analyzer == null ? TextSearchInfo.SIMPLE_MATCH_ONLY : new TextSearchInfo(Defaults.FIELD_TYPE, null, analyzer, analyzer), + meta ); + setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); this.ignoreAbove = Integer.MAX_VALUE; this.nullValue = null; - this.mappedFieldTypeName = null; - } - - public FlatObjectFieldType(String name, NamedAnalyzer analyzer) { - super(name, true, false, true, new TextSearchInfo(Defaults.FIELD_TYPE, null, analyzer, analyzer), Collections.emptyMap()); - this.ignoreAbove = Integer.MAX_VALUE; - this.nullValue = null; - this.mappedFieldTypeName = null; + this.mappedFieldTypeName = mappedFieldTypeName; + this.valueFieldType = getKeywordFieldType(name, VALUE_SUFFIX, isSearchable, hasDocValues); + this.valueAndPathFieldType = getKeywordFieldType(name, VALUE_AND_PATH_SUFFIX, isSearchable, hasDocValues); } - public FlatObjectFieldType(String name, String mappedFieldTypeName) { + public FlatObjectFieldType( + String name, + String mappedFieldTypeName, + KeywordFieldType valueFieldType, + KeywordFieldType valueAndPathFieldType + ) { super( name, - true, + valueFieldType.isSearchable(), false, - true, + valueFieldType.hasDocValues(), new TextSearchInfo(Defaults.FIELD_TYPE, null, Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER), Collections.emptyMap() ); this.ignoreAbove = Integer.MAX_VALUE; this.nullValue = null; this.mappedFieldTypeName = mappedFieldTypeName; - } - - void setValueFieldType(KeywordFieldMapper.KeywordFieldType valueFieldType) { this.valueFieldType = valueFieldType; + this.valueAndPathFieldType = valueAndPathFieldType; } - void setValueAndPathFieldType(KeywordFieldMapper.KeywordFieldType ValueAndPathFieldType) { - this.valueAndPathFieldType = ValueAndPathFieldType; + static KeywordFieldType getKeywordFieldType(String fullName, String valueType, boolean isSearchable, boolean hasDocValue) { + return new KeywordFieldType(fullName + valueType, isSearchable, hasDocValue, Collections.emptyMap()) { + @Override + protected String rewriteForDocValue(Object value) { + assert value instanceof String; + return fullName + DOT_SYMBOL + value; + } + }; } - public KeywordFieldMapper.KeywordFieldType getValueFieldType() { + public KeywordFieldType getValueFieldType() { return this.valueFieldType; } - public KeywordFieldMapper.KeywordFieldType getValueAndPathFieldType() { + public KeywordFieldType getValueAndPathFieldType() { return this.valueAndPathFieldType; } @@ -331,6 +332,10 @@ protected BytesRef indexedValueForSearch(Object value) { return getTextSearchInfo().getSearchAnalyzer().normalize(name(), value.toString()); } + private KeywordFieldType valueFieldType() { + return (mappedFieldTypeName == null) ? valueFieldType : valueAndPathFieldType; + } + /** * redirect queries with rewrite value to rewriteSearchValue and directSubFieldName */ @@ -352,17 +357,12 @@ public Query termQuery(Object value, @Nullable QueryShardContext context) { @Override public Query termsQuery(List values, QueryShardContext context) { - failIfNotIndexed(); - String directedSearchFieldName = directSubfield(); - BytesRef[] bytesRefs = new BytesRef[values.size()]; - for (int i = 0; i < bytesRefs.length; i++) { - String rewriteValues = rewriteValue(inputToString(values.get(i))); - - bytesRefs[i] = indexedValueForSearch(new BytesRef(rewriteValues)); - + List parsedValues = new ArrayList<>(values.size()); + for (Object value : values) { + parsedValues.add(rewriteValue(inputToString(value))); } - return new TermInSetQuery(directedSearchFieldName, bytesRefs); + return valueFieldType().termsQuery(parsedValues, context); } /** @@ -395,7 +395,7 @@ public String rewriteValue(String searchValueString) { } - private boolean hasMappedFieldTyeNameInQueryFieldName(String input) { + boolean hasMappedFieldTyeNameInQueryFieldName(String input) { String prefix = this.mappedFieldTypeName; if (prefix == null) { return false; @@ -413,6 +413,9 @@ private boolean hasMappedFieldTyeNameInQueryFieldName(String input) { } private String inputToString(Object inputValue) { + if (inputValue == null) { + return null; + } if (inputValue instanceof Integer) { String inputToString = Integer.toString((Integer) inputValue); return inputToString; @@ -448,46 +451,50 @@ private String inputToString(Object inputValue) { @Override public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, boolean caseInsensitive, QueryShardContext context) { - String directSubfield = directSubfield(); - String rewriteValue = rewriteValue(value); - - if (context.allowExpensiveQueries() == false) { - throw new OpenSearchException( - "[prefix] queries cannot be executed when '" - + ALLOW_EXPENSIVE_QUERIES.getKey() - + "' is set to false. For optimised prefix queries on text " - + "fields please enable [index_prefixes]." - ); - } - failIfNotIndexed(); - if (method == null) { - method = MultiTermQuery.CONSTANT_SCORE_REWRITE; - } - if (caseInsensitive) { - return AutomatonQueries.caseInsensitivePrefixQuery((new Term(directSubfield, indexedValueForSearch(rewriteValue))), method); - } - return new PrefixQuery(new Term(directSubfield, indexedValueForSearch(rewriteValue)), method); + return valueFieldType().prefixQuery(rewriteValue(value), method, caseInsensitive, context); + } + + @Override + public Query regexpQuery( + String value, + int syntaxFlags, + int matchFlags, + int maxDeterminizedStates, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + return valueFieldType().regexpQuery(rewriteValue(value), syntaxFlags, matchFlags, maxDeterminizedStates, method, context); + } + + @Override + public Query fuzzyQuery( + Object value, + Fuzziness fuzziness, + int prefixLength, + int maxExpansions, + boolean transpositions, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + return valueFieldType().fuzzyQuery( + rewriteValue(inputToString(value)), + fuzziness, + prefixLength, + maxExpansions, + transpositions, + method, + context + ); } @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { - String directSubfield = directSubfield(); - String rewriteUpperTerm = rewriteValue(inputToString(upperTerm)); - String rewriteLowerTerm = rewriteValue(inputToString(lowerTerm)); - if (context.allowExpensiveQueries() == false) { - throw new OpenSearchException( - "[range] queries on [text] or [keyword] fields cannot be executed when '" - + ALLOW_EXPENSIVE_QUERIES.getKey() - + "' is set to false." - ); - } - failIfNotIndexed(); - return new TermRangeQuery( - directSubfield, - lowerTerm == null ? null : indexedValueForSearch(rewriteLowerTerm), - upperTerm == null ? null : indexedValueForSearch(rewriteUpperTerm), + return valueFieldType().rangeQuery( + rewriteValue(inputToString(lowerTerm)), + rewriteValue(inputToString(upperTerm)), includeLower, - includeUpper + includeUpper, + context ); } @@ -503,8 +510,12 @@ public Query existsQuery(QueryShardContext context) { searchKey = this.mappedFieldTypeName; searchField = name(); } else { - searchKey = FieldNamesFieldMapper.NAME; - searchField = name(); + if (hasDocValues()) { + return new FieldExistsQuery(name()); + } else { + searchKey = FieldNamesFieldMapper.NAME; + searchField = name(); + } } return new TermQuery(new Term(searchKey, indexedValueForSearch(searchField))); } @@ -516,13 +527,7 @@ public Query wildcardQuery( boolean caseInsensitve, QueryShardContext context ) { - // flat_object field types are always normalized, so ignore case sensitivity and force normalize the wildcard - // query text - throw new QueryShardException( - context, - "Can only use wildcard queries on keyword and text fields - not on [" + name() + "] which is of type [" + typeName() + "]" - ); - + return valueFieldType().wildcardQuery(rewriteValue(value), method, caseInsensitve, context); } } @@ -606,7 +611,6 @@ protected void parseCreateField(ParseContext context) throws IOException { } } } - } @Override @@ -637,6 +641,8 @@ public Iterator iterator() { */ private void parseValueAddFields(ParseContext context, String value, String fieldName) throws IOException { + assert valueFieldMapper != null; + assert valueAndPathFieldMapper != null; NamedAnalyzer normalizer = fieldType().normalizer(); if (normalizer != null) { value = normalizeValue(normalizer, name(), value); @@ -647,69 +653,57 @@ private void parseValueAddFields(ParseContext context, String value, String fiel if (fieldType.indexOptions() != IndexOptions.NONE || fieldType.stored()) { // convert to utf8 only once before feeding postings/dv/stored fields - final BytesRef binaryValue = new BytesRef(fieldType().name() + DOT_SYMBOL + value); - Field field = new FlatObjectField(fieldType().name(), binaryValue, fieldType); - if (fieldType().hasDocValues() == false && fieldType.omitNorms()) { + if (fieldType().hasDocValues() == false) { createFieldNamesField(context); } if (fieldName.equals(fieldType().name())) { + Field field = new FlatObjectField(fieldType().name(), binaryValue, fieldType); context.doc().add(field); - } - if (valueType.equals(VALUE_SUFFIX)) { - if (valueFieldMapper != null) { - valueFieldMapper.addField(context, value); - } - } - if (valueType.equals(VALUE_AND_PATH_SUFFIX)) { - if (valueAndPathFieldMapper != null) { - valueAndPathFieldMapper.addField(context, value); - } + } else if (valueType.equals(VALUE_SUFFIX)) { + valueFieldMapper.addField(context, value); + } else if (valueType.equals(VALUE_AND_PATH_SUFFIX)) { + valueAndPathFieldMapper.addField(context, value); } if (fieldType().hasDocValues()) { if (fieldName.equals(fieldType().name())) { context.doc().add(new SortedSetDocValuesField(fieldType().name(), binaryValue)); - } - if (valueType.equals(VALUE_SUFFIX)) { - if (valueFieldMapper != null) { - context.doc().add(new SortedSetDocValuesField(fieldType().name() + VALUE_SUFFIX, binaryValue)); - } - } - if (valueType.equals(VALUE_AND_PATH_SUFFIX)) { - if (valueAndPathFieldMapper != null) { - context.doc().add(new SortedSetDocValuesField(fieldType().name() + VALUE_AND_PATH_SUFFIX, binaryValue)); - } + } else if (valueType.equals(VALUE_SUFFIX)) { + context.doc().add(new SortedSetDocValuesField(fieldType().name() + VALUE_SUFFIX, binaryValue)); + } else if (valueType.equals(VALUE_AND_PATH_SUFFIX)) { + context.doc().add(new SortedSetDocValuesField(fieldType().name() + VALUE_AND_PATH_SUFFIX, binaryValue)); } } - } - } private static String normalizeValue(NamedAnalyzer normalizer, String field, String value) throws IOException { - String normalizerErrorMessage = "The normalization token stream is " - + "expected to produce exactly 1 token, but got 0 for analyzer " - + normalizer - + " and input \"" - + value - + "\""; try (TokenStream ts = normalizer.tokenStream(field, value)) { final CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); ts.reset(); if (ts.incrementToken() == false) { - throw new IllegalStateException(normalizerErrorMessage); + throw new IllegalStateException(errorMessage(normalizer, value)); } final String newValue = termAtt.toString(); if (ts.incrementToken()) { - throw new IllegalStateException(normalizerErrorMessage); + throw new IllegalStateException(errorMessage(normalizer, value)); } ts.end(); return newValue; } } + private static String errorMessage(NamedAnalyzer normalizer, String value) { + return "The normalization token stream is " + + "expected to produce exactly 1 token, but got 0 for analyzer " + + normalizer + + " and input \"" + + value + + "\""; + } + @Override protected String contentType() { return CONTENT_TYPE; @@ -717,7 +711,7 @@ protected String contentType() { private static final class ValueAndPathFieldMapper extends FieldMapper { - protected ValueAndPathFieldMapper(FieldType fieldType, KeywordFieldMapper.KeywordFieldType mappedFieldType) { + protected ValueAndPathFieldMapper(FieldType fieldType, KeywordFieldType mappedFieldType) { super(mappedFieldType.name(), fieldType, mappedFieldType, MultiFields.empty(), CopyTo.empty()); } @@ -728,7 +722,7 @@ void addField(ParseContext context, String value) { context.doc().add(field); - if (fieldType().hasDocValues() == false && fieldType.omitNorms()) { + if (fieldType().hasDocValues() == false) { createFieldNamesField(context); } } @@ -758,7 +752,7 @@ public String toString() { private static final class ValueFieldMapper extends FieldMapper { - protected ValueFieldMapper(FieldType fieldType, KeywordFieldMapper.KeywordFieldType mappedFieldType) { + protected ValueFieldMapper(FieldType fieldType, KeywordFieldType mappedFieldType) { super(mappedFieldType.name(), fieldType, mappedFieldType, MultiFields.empty(), CopyTo.empty()); } @@ -768,7 +762,7 @@ void addField(ParseContext context, String value) { Field field = new KeywordFieldMapper.KeywordField(fieldType().name(), binaryValue, fieldType); context.doc().add(field); - if (fieldType().hasDocValues() == false && fieldType.omitNorms()) { + if (fieldType().hasDocValues() == false) { createFieldNamesField(context); } } diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java index 11ff601b3fd6d..54a1aead5fcc7 100644 --- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java @@ -263,7 +263,7 @@ public KeywordFieldMapper build(BuilderContext context) { * * @opensearch.internal */ - public static final class KeywordFieldType extends StringFieldType { + public static class KeywordFieldType extends StringFieldType { private final int ignoreAbove; private final String nullValue; @@ -387,6 +387,10 @@ protected BytesRef indexedValueForSearch(Object value) { return getTextSearchInfo().getSearchAnalyzer().normalize(name(), value.toString()); } + protected Object rewriteForDocValue(Object value) { + return value; + } + @Override public Query termsQuery(List values, QueryShardContext context) { failIfNotIndexedAndNoDocValues(); @@ -395,19 +399,21 @@ public Query termsQuery(List values, QueryShardContext context) { if (!context.keywordFieldIndexOrDocValuesEnabled()) { return super.termsQuery(values, context); } - BytesRef[] bytesRefs = new BytesRef[values.size()]; - for (int i = 0; i < bytesRefs.length; i++) { - bytesRefs[i] = indexedValueForSearch(values.get(i)); + BytesRef[] iBytesRefs = new BytesRef[values.size()]; + BytesRef[] dVByteRefs = new BytesRef[values.size()]; + for (int i = 0; i < iBytesRefs.length; i++) { + iBytesRefs[i] = indexedValueForSearch(values.get(i)); + dVByteRefs[i] = indexedValueForSearch(rewriteForDocValue(values.get(i))); } - Query indexQuery = new TermInSetQuery(name(), bytesRefs); - Query dvQuery = new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, name(), bytesRefs); + Query indexQuery = new TermInSetQuery(name(), iBytesRefs); + Query dvQuery = new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, name(), dVByteRefs); return new IndexOrDocValuesQuery(indexQuery, dvQuery); } // if we only have doc_values enabled, we construct a new query with doc_values re-written if (hasDocValues()) { BytesRef[] bytesRefs = new BytesRef[values.size()]; for (int i = 0; i < bytesRefs.length; i++) { - bytesRefs[i] = indexedValueForSearch(values.get(i)); + bytesRefs[i] = indexedValueForSearch(rewriteForDocValue(values.get(i))); } return new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, name(), bytesRefs); } @@ -436,17 +442,25 @@ public Query prefixQuery( return super.prefixQuery(value, method, caseInsensitive, context); } Query indexQuery = super.prefixQuery(value, method, caseInsensitive, context); - Query dvQuery = super.prefixQuery(value, MultiTermQuery.DOC_VALUES_REWRITE, caseInsensitive, context); + Query dvQuery = super.prefixQuery( + (String) rewriteForDocValue(value), + MultiTermQuery.DOC_VALUES_REWRITE, + caseInsensitive, + context + ); return new IndexOrDocValuesQuery(indexQuery, dvQuery); } if (hasDocValues()) { if (caseInsensitive) { return AutomatonQueries.caseInsensitivePrefixQuery( - (new Term(name(), indexedValueForSearch(value))), + (new Term(name(), indexedValueForSearch(rewriteForDocValue(value)))), MultiTermQuery.DOC_VALUES_REWRITE ); } - return new PrefixQuery(new Term(name(), indexedValueForSearch(value)), MultiTermQuery.DOC_VALUES_REWRITE); + return new PrefixQuery( + new Term(name(), indexedValueForSearch(rewriteForDocValue(value))), + MultiTermQuery.DOC_VALUES_REWRITE + ); } return super.prefixQuery(value, method, caseInsensitive, context); } @@ -472,7 +486,7 @@ public Query regexpQuery( } Query indexQuery = super.regexpQuery(value, syntaxFlags, matchFlags, maxDeterminizedStates, method, context); Query dvQuery = super.regexpQuery( - value, + (String) rewriteForDocValue(value), syntaxFlags, matchFlags, maxDeterminizedStates, @@ -483,7 +497,7 @@ public Query regexpQuery( } if (hasDocValues()) { return new RegexpQuery( - new Term(name(), indexedValueForSearch(value)), + new Term(name(), indexedValueForSearch(rewriteForDocValue(value))), syntaxFlags, matchFlags, RegexpQuery.DEFAULT_PROVIDER, @@ -514,8 +528,8 @@ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower ); Query dvQuery = new TermRangeQuery( name(), - lowerTerm == null ? null : indexedValueForSearch(lowerTerm), - upperTerm == null ? null : indexedValueForSearch(upperTerm), + lowerTerm == null ? null : indexedValueForSearch(rewriteForDocValue(lowerTerm)), + upperTerm == null ? null : indexedValueForSearch(rewriteForDocValue(upperTerm)), includeLower, includeUpper, MultiTermQuery.DOC_VALUES_REWRITE @@ -525,8 +539,8 @@ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower if (hasDocValues()) { return new TermRangeQuery( name(), - lowerTerm == null ? null : indexedValueForSearch(lowerTerm), - upperTerm == null ? null : indexedValueForSearch(upperTerm), + lowerTerm == null ? null : indexedValueForSearch(rewriteForDocValue(lowerTerm)), + upperTerm == null ? null : indexedValueForSearch(rewriteForDocValue(upperTerm)), includeLower, includeUpper, MultiTermQuery.DOC_VALUES_REWRITE @@ -563,7 +577,7 @@ public Query fuzzyQuery( } Query indexQuery = super.fuzzyQuery(value, fuzziness, prefixLength, maxExpansions, transpositions, method, context); Query dvQuery = super.fuzzyQuery( - value, + rewriteForDocValue(value), fuzziness, prefixLength, maxExpansions, @@ -575,8 +589,8 @@ public Query fuzzyQuery( } if (hasDocValues()) { return new FuzzyQuery( - new Term(name(), indexedValueForSearch(value)), - fuzziness.asDistance(BytesRefs.toString(value)), + new Term(name(), indexedValueForSearch(rewriteForDocValue(value))), + fuzziness.asDistance(BytesRefs.toString(rewriteForDocValue(value))), prefixLength, maxExpansions, transpositions, @@ -607,13 +621,19 @@ public Query wildcardQuery( return super.wildcardQuery(value, method, caseInsensitive, true, context); } Query indexQuery = super.wildcardQuery(value, method, caseInsensitive, true, context); - Query dvQuery = super.wildcardQuery(value, MultiTermQuery.DOC_VALUES_REWRITE, caseInsensitive, true, context); + Query dvQuery = super.wildcardQuery( + (String) rewriteForDocValue(value), + MultiTermQuery.DOC_VALUES_REWRITE, + caseInsensitive, + true, + context + ); return new IndexOrDocValuesQuery(indexQuery, dvQuery); } if (hasDocValues()) { Term term; value = normalizeWildcardPattern(name(), value, getTextSearchInfo().getSearchAnalyzer()); - term = new Term(name(), value); + term = new Term(name(), (String) rewriteForDocValue(value)); if (caseInsensitive) { return AutomatonQueries.caseInsensitiveWildcardQuery(term, method); } diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java index 94d1f501bee51..afd9e994ce3ae 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java @@ -12,6 +12,7 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; +import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; @@ -23,14 +24,13 @@ import java.io.IOException; +import static org.opensearch.common.xcontent.JsonToStringXContentParser.VALUE_AND_PATH_SUFFIX; +import static org.opensearch.common.xcontent.JsonToStringXContentParser.VALUE_SUFFIX; +import static org.opensearch.index.mapper.FlatObjectFieldMapper.CONTENT_TYPE; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.core.IsEqual.equalTo; public class FlatObjectFieldMapperTests extends MapperTestCase { - private static final String FIELD_TYPE = "flat_object"; - private static final String VALUE_AND_PATH_SUFFIX = "._valueAndPath"; - private static final String VALUE_SUFFIX = "._value"; - protected boolean supportsMeta() { return false; } @@ -41,7 +41,7 @@ protected boolean supportsOrIgnoresBoost() { public void testMapperServiceHasParser() throws IOException { MapperService mapperService = createMapperService(fieldMapping(b -> { minimalMapping(b); })); - Mapper.TypeParser parser = mapperService.mapperRegistry.getMapperParsers().get(FIELD_TYPE); + Mapper.TypeParser parser = mapperService.mapperRegistry.getMapperParsers().get(CONTENT_TYPE); assertNotNull(parser); assertTrue(parser instanceof FlatObjectFieldMapper.TypeParser); } @@ -49,28 +49,39 @@ public void testMapperServiceHasParser() throws IOException { protected void assertExistsQuery(MapperService mapperService) throws IOException { ParseContext.Document fields = mapperService.documentMapper().parse(source(this::writeField)).rootDoc(); QueryShardContext queryShardContext = createQueryShardContext(mapperService); - MappedFieldType fieldType = mapperService.fieldType("field"); + FlatObjectFieldMapper.FlatObjectFieldType fieldType = (FlatObjectFieldMapper.FlatObjectFieldType) mapperService.fieldType("field"); Query query = fieldType.existsQuery(queryShardContext); assertExistsQuery(fieldType, query, fields); - } - protected void assertExistsQuery(MappedFieldType fieldType, Query query, ParseContext.Document fields) { - // we always perform a term query against _field_names, even when the field - // is not added to _field_names because it is not indexed nor stored - assertThat(query, instanceOf(TermQuery.class)); - TermQuery termQuery = (TermQuery) query; - assertEquals(FieldNamesFieldMapper.NAME, termQuery.getTerm().field()); - assertEquals("field", termQuery.getTerm().text()); - if (fieldType.isSearchable() || fieldType.isStored()) { - assertNotNull(fields.getField(FieldNamesFieldMapper.NAME)); + protected void assertExistsQuery(FlatObjectFieldMapper.FlatObjectFieldType fieldType, Query query, ParseContext.Document fields) { + + if (fieldType.hasDocValues() && fieldType.hasMappedFieldTyeNameInQueryFieldName(fieldType.name()) == false) { + assertThat(query, instanceOf(FieldExistsQuery.class)); + FieldExistsQuery fieldExistsQuery = (FieldExistsQuery) query; + assertEquals(fieldType.name(), fieldExistsQuery.getField()); } else { + assertThat(query, instanceOf(TermQuery.class)); + TermQuery termQuery = (TermQuery) query; + assertEquals(FieldNamesFieldMapper.NAME, termQuery.getTerm().field()); + assertEquals("field", termQuery.getTerm().text()); + } + + if (fieldType.hasDocValues()) { + assertDocValuesField(fields, "field"); assertNoFieldNamesField(fields); + } else { + assertNoDocValuesField(fields, "field"); + if (fieldType.isSearchable()) { + assertNotNull(fields.getField(FieldNamesFieldMapper.NAME)); + } else { + assertNoFieldNamesField(fields); + } } } public void minimalMapping(XContentBuilder b) throws IOException { - b.field("type", FIELD_TYPE); + b.field("type", CONTENT_TYPE); } /** diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java index 9ec053dc59d10..38a6f13777f00 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java @@ -8,11 +8,23 @@ package org.opensearch.index.mapper; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; +import org.apache.lucene.search.FieldExistsQuery; +import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; +import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.PrefixQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; -import org.opensearch.Version; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.settings.Settings; +import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.search.WildcardQuery; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.automaton.Operations; +import org.opensearch.common.unit.Fuzziness; import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.NamedAnalyzer; @@ -21,18 +33,41 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; +import static org.opensearch.common.xcontent.JsonToStringXContentParser.VALUE_AND_PATH_SUFFIX; +import static org.opensearch.common.xcontent.JsonToStringXContentParser.VALUE_SUFFIX; +import static org.apache.lucene.search.MultiTermQuery.CONSTANT_SCORE_REWRITE; +import static org.apache.lucene.search.MultiTermQuery.DOC_VALUES_REWRITE; + public class FlatObjectFieldTypeTests extends FieldTypeTestCase { - private static MappedFieldType getFlatParentFieldType(String fieldName) { - Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT.id).build(); - Mapper.BuilderContext context = new Mapper.BuilderContext(settings, new ContentPath()); - MappedFieldType flatParentFieldType = new FlatObjectFieldMapper.Builder(fieldName).build(context).fieldType(); - return flatParentFieldType; + + private static MappedFieldType getFlatParentFieldType( + String fieldName, + String mappedFieldTypeName, + boolean isSearchable, + boolean hasDocValues + ) { + FlatObjectFieldMapper.Builder builder = new FlatObjectFieldMapper.Builder(fieldName); + FlatObjectFieldMapper.FlatObjectFieldType flatObjectFieldType = new FlatObjectFieldMapper.FlatObjectFieldType( + fieldName, + mappedFieldTypeName, + isSearchable, + hasDocValues, + null, + Collections.emptyMap() + ); + FieldType fieldtype = new FieldType(FlatObjectFieldMapper.Defaults.FIELD_TYPE); + FieldType vft = new FieldType(fieldtype); + if (flatObjectFieldType.isSearchable() == false) { + vft.setIndexOptions(IndexOptions.NONE); + } + return flatObjectFieldType; } public void testFetchSourceValue() throws IOException { - MappedFieldType mapper = getFlatParentFieldType("field"); + MappedFieldType mapper = getFlatParentFieldType("field", null, true, true); Map jsonPoint = new HashMap<>(); jsonPoint.put("type", "flat_object"); @@ -54,32 +89,48 @@ public void testFetchSourceValue() throws IOException { public void testDirectSubfield() { { - MappedFieldType flatParentFieldType = getFlatParentFieldType("field"); + FlatObjectFieldMapper.FlatObjectFieldType flatParentFieldType = + (FlatObjectFieldMapper.FlatObjectFieldType) (getFlatParentFieldType("field", null, true, true)); // when searching for "foo" in "field", the directSubfield is field._value field - String searchFieldName = ((FlatObjectFieldMapper.FlatObjectFieldType) flatParentFieldType).directSubfield(); + String searchFieldName = (flatParentFieldType).directSubfield(); assertEquals("field._value", searchFieldName); - MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType("bar", flatParentFieldType.name()); + MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType( + "bar", + flatParentFieldType.name(), + flatParentFieldType.getValueFieldType(), + flatParentFieldType.getValueAndPathFieldType() + ); // when searching for "foo" in "field.bar", the directSubfield is field._valueAndPath field String searchFieldNameDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).directSubfield(); assertEquals("field._valueAndPath", searchFieldNameDocPath); } { NamedAnalyzer analyzer = new NamedAnalyzer("default", AnalyzerScope.INDEX, null); - MappedFieldType ft = new FlatObjectFieldMapper.FlatObjectFieldType("field", analyzer); + MappedFieldType ft = new FlatObjectFieldMapper.FlatObjectFieldType("field", null, true, true, analyzer, Collections.emptyMap()); assertEquals("field._value", ((FlatObjectFieldMapper.FlatObjectFieldType) ft).directSubfield()); } } public void testRewriteValue() { - MappedFieldType flatParentFieldType = getFlatParentFieldType("field"); + FlatObjectFieldMapper.FlatObjectFieldType flatParentFieldType = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + true + ); // when searching for "foo" in "field", the rewrite value is "foo" - String searchValues = ((FlatObjectFieldMapper.FlatObjectFieldType) flatParentFieldType).rewriteValue("foo"); + String searchValues = (flatParentFieldType).rewriteValue("foo"); assertEquals("foo", searchValues); - MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType("field.bar", flatParentFieldType.name()); + MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType( + "field.bar", + flatParentFieldType.name(), + flatParentFieldType.getValueFieldType(), + flatParentFieldType.getValueAndPathFieldType() + ); // when searching for "foo" in "field.bar", the rewrite value is "field.bar=foo" String searchFieldNameDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).directSubfield(); @@ -89,15 +140,25 @@ public void testRewriteValue() { public void testTermQuery() { - MappedFieldType flatParentFieldType = getFlatParentFieldType("field"); + FlatObjectFieldMapper.FlatObjectFieldType flatParentFieldType = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + true + ); // when searching for "foo" in "field", the term query is directed to search "foo" in field._value field - String searchFieldName = ((FlatObjectFieldMapper.FlatObjectFieldType) flatParentFieldType).directSubfield(); - String searchValues = ((FlatObjectFieldMapper.FlatObjectFieldType) flatParentFieldType).rewriteValue("foo"); + String searchFieldName = (flatParentFieldType).directSubfield(); + String searchValues = (flatParentFieldType).rewriteValue("foo"); assertEquals("foo", searchValues); assertEquals(new TermQuery(new Term(searchFieldName, searchValues)), flatParentFieldType.termQuery(searchValues, null)); - MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType("field.bar", flatParentFieldType.name()); + MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType( + "field.bar", + flatParentFieldType.name(), + flatParentFieldType.getValueFieldType(), + flatParentFieldType.getValueAndPathFieldType() + ); // when searching for "foo" in "field.bar", the term query is directed to search in field._valueAndPath field String searchFieldNameDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).directSubfield(); @@ -105,30 +166,919 @@ public void testTermQuery() { assertEquals("field.bar=foo", searchValuesDocPath); assertEquals(new TermQuery(new Term(searchFieldNameDocPath, searchValuesDocPath)), dynamicMappedFieldType.termQuery("foo", null)); - MappedFieldType unsearchable = new FlatObjectFieldMapper.FlatObjectFieldType("field", false, true, Collections.emptyMap()); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("bar", null)); + MappedFieldType unsearchable = new FlatObjectFieldMapper.FlatObjectFieldType( + "field", + null, + false, + true, + null, + Collections.emptyMap() + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> unsearchable.termQuery("bar", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); } public void testExistsQuery() { { - MappedFieldType ft = getFlatParentFieldType("field"); + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + true + ); // when checking on the flat_object field name "field", check if exist in the field mapper names - assertEquals(new TermQuery(new Term(FieldNamesFieldMapper.NAME, "field")), ft.existsQuery(null)); + assertEquals(new FieldExistsQuery("field"), ft.existsQuery(null)); // when checking if a subfield within the flat_object, for example, "field.bar", use term query in the flat_object field - MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType("field.bar", ft.name()); + MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType( + "field.bar", + ft.name(), + ft.getValueFieldType(), + ft.getValueAndPathFieldType() + ); assertEquals(new TermQuery(new Term("field", "field.bar")), dynamicMappedFieldType.existsQuery(null)); } + { FlatObjectFieldMapper.FlatObjectFieldType ft = new FlatObjectFieldMapper.FlatObjectFieldType( "field", + null, true, false, + null, Collections.emptyMap() ); - assertEquals(new TermQuery(new Term(FieldNamesFieldMapper.NAME, "field")), ft.existsQuery(null)); + assertEquals(new TermQuery(new Term(FieldNamesFieldMapper.NAME, "field")), ft.existsQuery(MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + } + + public void testTermsQuery() { + + // 1.test isSearchable=true, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + true + ); + List indexTerms = new ArrayList<>(); + indexTerms.add(new BytesRef("foo")); + indexTerms.add(new BytesRef("bar")); + List docValueterms = new ArrayList<>(); + docValueterms.add(new BytesRef("field.foo")); + docValueterms.add(new BytesRef("field.bar")); + Query expected = new IndexOrDocValuesQuery( + new TermInSetQuery("field" + VALUE_SUFFIX, indexTerms), + new TermInSetQuery(DOC_VALUES_REWRITE, "field" + VALUE_SUFFIX, docValueterms) + ); + + assertEquals(expected, ft.termsQuery(Arrays.asList("foo", "bar"), MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=true, hasDocValues=true, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + true, + true + ); + + List indexTerms = new ArrayList<>(); + indexTerms.add(new BytesRef("foo")); + indexTerms.add(new BytesRef("bar")); + List docValueterms = new ArrayList<>(); + docValueterms.add(new BytesRef("field.foo")); + docValueterms.add(new BytesRef("field.bar")); + Query expected = new IndexOrDocValuesQuery( + new TermInSetQuery("field" + VALUE_AND_PATH_SUFFIX, indexTerms), + new TermInSetQuery(DOC_VALUES_REWRITE, "field" + VALUE_AND_PATH_SUFFIX, docValueterms) + ); + + assertEquals(expected, ft.termsQuery(Arrays.asList("foo", "bar"), MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 2.test isSearchable=true, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + false + ); + List indexTerms = new ArrayList<>(); + indexTerms.add(new BytesRef("foo")); + indexTerms.add(new BytesRef("bar")); + Query expected = new TermInSetQuery("field" + VALUE_SUFFIX, indexTerms); + + assertEquals(expected, ft.termsQuery(Arrays.asList("foo", "bar"), MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=true, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + true, + false + ); + List indexTerms = new ArrayList<>(); + indexTerms.add(new BytesRef("foo")); + indexTerms.add(new BytesRef("bar")); + Query expected = new TermInSetQuery("field" + VALUE_AND_PATH_SUFFIX, indexTerms); + + assertEquals(expected, ft.termsQuery(Arrays.asList("foo", "bar"), null)); + } + + // 3.test isSearchable=false, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + true + ); + + List indexTerms = new ArrayList<>(); + indexTerms.add(new BytesRef("foo")); + indexTerms.add(new BytesRef("bar")); + List docValueterms = new ArrayList<>(); + docValueterms.add(new BytesRef("field.foo")); + docValueterms.add(new BytesRef("field.bar")); + Query expected = new TermInSetQuery(DOC_VALUES_REWRITE, "field" + VALUE_SUFFIX, docValueterms); + + assertEquals(expected, ft.termsQuery(Arrays.asList("foo", "bar"), MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=false, hasDocValues=true, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + true + ); + + List indexTerms = new ArrayList<>(); + indexTerms.add(new BytesRef("foo")); + indexTerms.add(new BytesRef("bar")); + List docValueterms = new ArrayList<>(); + docValueterms.add(new BytesRef("field.foo")); + docValueterms.add(new BytesRef("field.bar")); + Query expected = new TermInSetQuery(DOC_VALUES_REWRITE, "field" + VALUE_AND_PATH_SUFFIX, docValueterms); + + assertEquals(expected, ft.termsQuery(Arrays.asList("foo", "bar"), MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 4.test isSearchable=false, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.termsQuery(Arrays.asList("foo", "bar"), MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + + // test isSearchable=false, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.termsQuery(Arrays.asList("foo", "bar"), MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._valueAndPath] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + } + + public void testPrefixQuery() { + // 1.test isSearchable=true, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + true + ); + Query expected = new IndexOrDocValuesQuery( + new PrefixQuery(new Term("field" + VALUE_SUFFIX, "foo"), CONSTANT_SCORE_REWRITE), + new PrefixQuery(new Term("field" + VALUE_SUFFIX, "field.foo"), DOC_VALUES_REWRITE) + ); + assertEquals(expected, ft.prefixQuery("foo", CONSTANT_SCORE_REWRITE, false, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=true, hasDocValues=true, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + true, + true + ); + Query expected = new IndexOrDocValuesQuery( + new PrefixQuery(new Term("field" + VALUE_AND_PATH_SUFFIX, "foo"), CONSTANT_SCORE_REWRITE), + new PrefixQuery(new Term("field" + VALUE_AND_PATH_SUFFIX, "field.foo"), DOC_VALUES_REWRITE) + ); + assertEquals(expected, ft.prefixQuery("foo", CONSTANT_SCORE_REWRITE, false, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 2.test isSearchable=true, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + false + ); + Query expected = new PrefixQuery(new Term("field" + VALUE_SUFFIX, "foo"), CONSTANT_SCORE_REWRITE); + assertEquals(expected, ft.prefixQuery("foo", CONSTANT_SCORE_REWRITE, false, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=true, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + true, + false + ); + Query expected = new PrefixQuery(new Term("field" + VALUE_AND_PATH_SUFFIX, "foo"), CONSTANT_SCORE_REWRITE); + assertEquals(expected, ft.prefixQuery("foo", CONSTANT_SCORE_REWRITE, false, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 3.test isSearchable=false, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + true + ); + Query expected = new PrefixQuery(new Term("field" + VALUE_SUFFIX, "field.foo"), DOC_VALUES_REWRITE); + assertEquals(expected, ft.prefixQuery("foo", CONSTANT_SCORE_REWRITE, false, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=false, hasDocValues=true, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + true + ); + Query expected = new PrefixQuery(new Term("field" + VALUE_AND_PATH_SUFFIX, "field.foo"), DOC_VALUES_REWRITE); + assertEquals(expected, ft.prefixQuery("foo", CONSTANT_SCORE_REWRITE, false, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 4.test isSearchable=false, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.prefixQuery("foo", CONSTANT_SCORE_REWRITE, false, MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + + // test isSearchable=false, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.prefixQuery("foo", CONSTANT_SCORE_REWRITE, false, MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._valueAndPath] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + } + + public void testRegexpQuery() { + // 1.test isSearchable=true, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + true + ); + Query expected = new IndexOrDocValuesQuery( + new RegexpQuery( + new Term("field" + VALUE_SUFFIX, new BytesRef("foo")), + 0, + 0, + RegexpQuery.DEFAULT_PROVIDER, + 10, + CONSTANT_SCORE_REWRITE + ), + new RegexpQuery( + new Term("field" + VALUE_SUFFIX, new BytesRef("field.foo")), + 0, + 0, + RegexpQuery.DEFAULT_PROVIDER, + 10, + DOC_VALUES_REWRITE + ) + ); + assertEquals(expected, ft.regexpQuery("foo", 0, 0, 10, null, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=true, hasDocValues=true, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + true, + true + ); + Query expected = new IndexOrDocValuesQuery( + new RegexpQuery( + new Term("field" + VALUE_AND_PATH_SUFFIX, new BytesRef("foo")), + 0, + 0, + RegexpQuery.DEFAULT_PROVIDER, + 10, + CONSTANT_SCORE_REWRITE + ), + new RegexpQuery( + new Term("field" + VALUE_AND_PATH_SUFFIX, new BytesRef("field.foo")), + 0, + 0, + RegexpQuery.DEFAULT_PROVIDER, + 10, + DOC_VALUES_REWRITE + ) + ); + assertEquals(expected, ft.regexpQuery("foo", 0, 0, 10, null, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 2.test isSearchable=true, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + false + ); + Query expected = new RegexpQuery( + new Term("field" + VALUE_SUFFIX, new BytesRef("foo")), + 0, + 0, + RegexpQuery.DEFAULT_PROVIDER, + 10, + CONSTANT_SCORE_REWRITE + ); + assertEquals(expected, ft.regexpQuery("foo", 0, 0, 10, null, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=true, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + true, + false + ); + Query expected = new RegexpQuery( + new Term("field" + VALUE_AND_PATH_SUFFIX, new BytesRef("foo")), + 0, + 0, + RegexpQuery.DEFAULT_PROVIDER, + 10, + CONSTANT_SCORE_REWRITE + ); + assertEquals(expected, ft.regexpQuery("foo", 0, 0, 10, null, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 3.test isSearchable=false, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + true + ); + Query expected = new RegexpQuery( + new Term("field" + VALUE_SUFFIX, new BytesRef("field.foo")), + 0, + 0, + RegexpQuery.DEFAULT_PROVIDER, + 10, + DOC_VALUES_REWRITE + ); + assertEquals(expected, ft.regexpQuery("foo", 0, 0, 10, null, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=false, hasDocValues=true, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + true + ); + Query expected = new RegexpQuery( + new Term("field" + VALUE_AND_PATH_SUFFIX, new BytesRef("field.foo")), + 0, + 0, + RegexpQuery.DEFAULT_PROVIDER, + 10, + DOC_VALUES_REWRITE + ); + assertEquals(expected, ft.regexpQuery("foo", 0, 0, 10, null, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 4.test isSearchable=false, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.regexpQuery("foo", 0, 0, 10, null, MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + + // test isSearchable=false, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.regexpQuery("foo", 0, 0, 10, null, MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._valueAndPath] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + } + + public void testFuzzyQuery() { + // 1.test isSearchable=true, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + true + ); + Query expected = new IndexOrDocValuesQuery( + new FuzzyQuery(new Term("field" + VALUE_SUFFIX, "foo"), 2, 1, 50, true), + new FuzzyQuery(new Term("field" + VALUE_SUFFIX, "field.foo"), 2, 1, 50, true, MultiTermQuery.DOC_VALUES_REWRITE) + ); + assertEquals(expected, ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, null, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=true, hasDocValues=true, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + true, + true + ); + Query expected = new IndexOrDocValuesQuery( + new FuzzyQuery(new Term("field" + VALUE_AND_PATH_SUFFIX, "foo"), 2, 1, 50, true), + new FuzzyQuery(new Term("field" + VALUE_AND_PATH_SUFFIX, "field.foo"), 2, 1, 50, true, MultiTermQuery.DOC_VALUES_REWRITE) + ); + assertEquals(expected, ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, null, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 2.test isSearchable=true, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + false + ); + Query expected = new FuzzyQuery(new Term("field" + VALUE_SUFFIX, "foo"), 2, 1, 50, true); + assertEquals(expected, ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, null, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=true, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + true, + false + ); + Query expected = new FuzzyQuery(new Term("field" + VALUE_AND_PATH_SUFFIX, "foo"), 2, 1, 50, true); + assertEquals(expected, ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, null, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 3.test isSearchable=false, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + true + ); + Query expected = new FuzzyQuery( + new Term("field" + VALUE_SUFFIX, "field.foo"), + 2, + 1, + 50, + true, + MultiTermQuery.DOC_VALUES_REWRITE + ); + assertEquals(expected, ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, null, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=false, hasDocValues=true, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + true + ); + Query expected = new FuzzyQuery( + new Term("field" + VALUE_AND_PATH_SUFFIX, "field.foo"), + 2, + 1, + 50, + true, + MultiTermQuery.DOC_VALUES_REWRITE + ); + assertEquals(expected, ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, null, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 4.test isSearchable=false, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, null, MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + + // test isSearchable=false, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, null, MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._valueAndPath] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + } + + public void testRangeQuery() { + // 1.test isSearchable=true, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + true + ); + Query expected = new IndexOrDocValuesQuery( + new TermRangeQuery("field" + VALUE_SUFFIX, new BytesRef("2"), new BytesRef("10"), true, true), + new TermRangeQuery( + "field" + VALUE_SUFFIX, + new BytesRef("field.2"), + new BytesRef("field.10"), + true, + true, + DOC_VALUES_REWRITE + ) + ); + assertEquals(expected, ft.rangeQuery(new BytesRef("2"), new BytesRef("10"), true, true, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=true, hasDocValues=true, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + true, + true + ); + Query expected = new IndexOrDocValuesQuery( + new TermRangeQuery("field" + VALUE_AND_PATH_SUFFIX, new BytesRef("2"), new BytesRef("10"), true, true), + new TermRangeQuery( + "field" + VALUE_AND_PATH_SUFFIX, + new BytesRef("field.2"), + new BytesRef("field.10"), + true, + true, + DOC_VALUES_REWRITE + ) + ); + assertEquals(expected, ft.rangeQuery(new BytesRef("2"), new BytesRef("10"), true, true, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 2.test isSearchable=true, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + false + ); + Query expected = new TermRangeQuery("field" + VALUE_SUFFIX, new BytesRef("2"), new BytesRef("10"), true, true); + assertEquals(expected, ft.rangeQuery(new BytesRef("2"), new BytesRef("10"), true, true, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=true, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + true, + false + ); + Query expected = new TermRangeQuery("field" + VALUE_AND_PATH_SUFFIX, new BytesRef("2"), new BytesRef("10"), true, true); + assertEquals(expected, ft.rangeQuery(new BytesRef("2"), new BytesRef("10"), true, true, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 3.test isSearchable=false, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + true + ); + Query expected = new TermRangeQuery( + "field" + VALUE_SUFFIX, + new BytesRef("field.2"), + new BytesRef("field.10"), + true, + true, + DOC_VALUES_REWRITE + ); + assertEquals(expected, ft.rangeQuery(new BytesRef("2"), new BytesRef("10"), true, true, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=false, hasDocValues=true, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + true + ); + Query expected = new TermRangeQuery( + "field" + VALUE_AND_PATH_SUFFIX, + new BytesRef("field.2"), + new BytesRef("field.10"), + true, + true, + DOC_VALUES_REWRITE + ); + assertEquals(expected, ft.rangeQuery(new BytesRef("2"), new BytesRef("10"), true, true, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 4.test isSearchable=false, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.rangeQuery(new BytesRef("2"), new BytesRef("10"), true, true, MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + + // test isSearchable=false, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.rangeQuery(new BytesRef("2"), new BytesRef("10"), true, true, MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._valueAndPath] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + } + + public void testWildcardQuery() { + // 1.test isSearchable=true, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + true + ); + Query expected = new IndexOrDocValuesQuery( + new WildcardQuery( + new Term("field" + VALUE_SUFFIX, "foo*"), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, + MultiTermQuery.CONSTANT_SCORE_REWRITE + ), + new WildcardQuery( + new Term("field" + VALUE_SUFFIX, "field.foo*"), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, + MultiTermQuery.DOC_VALUES_REWRITE + ) + ); + assertEquals(expected, ft.wildcardQuery("foo*", null, false, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=true, hasDocValues=true, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + true, + true + ); + Query expected = new IndexOrDocValuesQuery( + new WildcardQuery( + new Term("field" + VALUE_AND_PATH_SUFFIX, "foo*"), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, + MultiTermQuery.CONSTANT_SCORE_REWRITE + ), + new WildcardQuery( + new Term("field" + VALUE_AND_PATH_SUFFIX, "field.foo*"), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, + MultiTermQuery.DOC_VALUES_REWRITE + ) + ); + assertEquals(expected, ft.wildcardQuery("foo*", null, false, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 2.test isSearchable=true, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + false + ); + Query expected = new WildcardQuery( + new Term("field" + VALUE_SUFFIX, "foo*"), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, + MultiTermQuery.CONSTANT_SCORE_REWRITE + ); + assertEquals(expected, ft.wildcardQuery("foo*", null, false, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=true, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + true, + false + ); + Query expected = new WildcardQuery( + new Term("field" + VALUE_AND_PATH_SUFFIX, "foo*"), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, + MultiTermQuery.CONSTANT_SCORE_REWRITE + ); + assertEquals(expected, ft.wildcardQuery("foo*", null, false, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 3.test isSearchable=false, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + true + ); + Query expected = new WildcardQuery( + new Term("field" + VALUE_SUFFIX, "field.foo*"), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, + MultiTermQuery.DOC_VALUES_REWRITE + ); + assertEquals(expected, ft.wildcardQuery("foo*", null, false, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=false, hasDocValues=true, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + true + ); + Query expected = new WildcardQuery( + new Term("field" + VALUE_AND_PATH_SUFFIX, "field.foo*"), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, + MultiTermQuery.DOC_VALUES_REWRITE + ); + assertEquals(expected, ft.wildcardQuery("foo*", null, false, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 4.test isSearchable=false, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.wildcardQuery("foo*", null, false, MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + + // test isSearchable=false, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.wildcardQuery("foo*", null, false, MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._valueAndPath] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); } } } From 6c17119e1051aa9db339454df32aec0c18c52499 Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Mon, 14 Oct 2024 16:41:17 -0700 Subject: [PATCH 16/19] Add new benchmark config for query approximation (#16323) We have guarded the experimental query approximation framework behind a feature flag. In order to easily measure the impact of approximation on big5 benchmarks, it would be nice to have a benchmark config. Signed-off-by: Michael Froh --- .github/benchmark-configs.json | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/.github/benchmark-configs.json b/.github/benchmark-configs.json index 27b7228e1203a..732f2f9b96ae3 100644 --- a/.github/benchmark-configs.json +++ b/.github/benchmark-configs.json @@ -221,5 +221,23 @@ "data_instance_config": "4vCPU, 32G Mem, 16G Heap" }, "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" + }, + "id_14": { + "description": "Search only test-procedure for big5, uses snapshot to restore the data for OS-3.0.0. Enables range query approximation.", + "supported_major_versions": ["3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "big5", + "ADDITIONAL_CONFIG": "opensearch.experimental.feature.approximate_point_range_query.enabled:true", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard_ordered\"}", + "CAPTURE_NODE_STAT": "true", + "TEST_PROCEDURE": "restore-from-snapshot" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" } } From a53e0c63aa9c6a85fed30e7bdce8b533aa471060 Mon Sep 17 00:00:00 2001 From: Sooraj Sinha <81695996+soosinha@users.noreply.github.com> Date: Tue, 15 Oct 2024 11:21:12 +0530 Subject: [PATCH 17/19] Update last seen cluster state in commit phase (#16215) * Update last seen cluster state on apply commit Signed-off-by: Sooraj Sinha --- CHANGELOG.md | 1 + .../opensearch/cluster/coordination/Coordinator.java | 8 +++++++- .../coordination/PublicationTransportHandler.java | 10 +++++++--- .../coordination/PublicationTransportHandlerTests.java | 2 +- 4 files changed, 16 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9665133e91207..668846cc1da6a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -62,6 +62,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Enable coordinator search.request_stats_enabled by default ([#16290](https://github.com/opensearch-project/OpenSearch/pull/16290)) - Code cleanup: Remove ApproximateIndexOrDocValuesQuery ([#16273](https://github.com/opensearch-project/OpenSearch/pull/16273)) - Optimise clone operation for incremental full cluster snapshots ([#16296](https://github.com/opensearch-project/OpenSearch/pull/16296)) +- Update last seen cluster state in the commit phase ([#16215](https://github.com/opensearch-project/OpenSearch/pull/16215)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 1b3ae89251ac0..02d5f8431f0ad 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -105,6 +105,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; +import java.util.function.Consumer; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -383,7 +384,11 @@ void onFollowerCheckRequest(FollowerCheckRequest followerCheckRequest) { } } - private void handleApplyCommit(ApplyCommitRequest applyCommitRequest, ActionListener applyListener) { + private void handleApplyCommit( + ApplyCommitRequest applyCommitRequest, + Consumer updateLastSeen, + ActionListener applyListener + ) { synchronized (mutex) { logger.trace("handleApplyCommit: applying commit {}", applyCommitRequest); @@ -391,6 +396,7 @@ private void handleApplyCommit(ApplyCommitRequest applyCommitRequest, ActionList final ClusterState committedState = hideStateIfNotRecovered(coordinationState.get().getLastAcceptedState()); applierState = mode == Mode.CANDIDATE ? clusterStateWithNoClusterManagerBlock(committedState) : committedState; clusterApplier.setPreCommitState(applierState); + updateLastSeen.accept(coordinationState.get().getLastAcceptedState()); if (applyCommitRequest.getSourceNode().equals(getLocalNode())) { // cluster-manager node applies the committed state at the end of the publication process, not here. diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java index 42aa55433dd5f..d30efde52bffb 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java @@ -43,6 +43,7 @@ import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.TriConsumer; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; @@ -65,7 +66,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; @@ -110,7 +110,7 @@ public PublicationTransportHandler( TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, Function handlePublishRequest, - BiConsumer> handleApplyCommit, + TriConsumer, ActionListener> handleApplyCommit, RemoteClusterStateService remoteClusterStateService ) { this.transportService = transportService; @@ -142,7 +142,7 @@ public PublicationTransportHandler( false, false, ApplyCommitRequest::new, - (request, channel, task) -> handleApplyCommit.accept(request, transportCommitCallback(channel)) + (request, channel, task) -> handleApplyCommit.apply(request, this::updateLastSeen, transportCommitCallback(channel)) ); } @@ -377,6 +377,10 @@ private boolean validateRemotePublicationConfiguredOnAllNodes(DiscoveryNodes dis return true; } + private void updateLastSeen(final ClusterState clusterState) { + lastSeenClusterState.set(clusterState); + } + // package private for testing void setCurrentPublishRequestToSelf(PublishRequest publishRequest) { this.currentPublishRequestToSelf.set(publishRequest); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTransportHandlerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTransportHandlerTests.java index 266928c919fe2..616559e91536d 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTransportHandlerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTransportHandlerTests.java @@ -466,7 +466,7 @@ private PublicationTransportHandler getPublicationTransportHandler( transportService, writableRegistry(), handlePublishRequest, - (pu, l) -> {}, + (pu, uc, l) -> {}, remoteClusterStateService ); transportService.start(); From 35c366ddc794e0600184cf406c06ae65061e28ce Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Tue, 15 Oct 2024 12:20:39 +0530 Subject: [PATCH 18/19] Add support to dynamically resize threadpools size (#16236) Signed-off-by: Gaurav Bafna --- CHANGELOG.md | 1 + .../cluster/settings/ClusterSettingsIT.java | 108 ++++++++++++++++++ .../common/settings/ClusterSettings.java | 5 +- .../main/java/org/opensearch/node/Node.java | 1 + .../org/opensearch/threadpool/ThreadPool.java | 101 ++++++++++++++++ .../threadpool/ThreadPoolTests.java | 44 +++++++ 6 files changed, 259 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 668846cc1da6a..5bf90a75d0148 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add _list/indices API as paginated alternate to _cat/indices ([#14718](https://github.com/opensearch-project/OpenSearch/pull/14718)) - Add success and failure metrics for async shard fetch ([#15976](https://github.com/opensearch-project/OpenSearch/pull/15976)) - Add new metric REMOTE_STORE to NodeStats API response ([#15611](https://github.com/opensearch-project/OpenSearch/pull/15611)) +- Add support to dynamically resize threadpools size. ([#16236](https://github.com/opensearch-project/OpenSearch/pull/16236)) - [S3 Repository] Change default retry mechanism of s3 clients to Standard Mode ([#15978](https://github.com/opensearch-project/OpenSearch/pull/15978)) - Add changes to block calls in cat shards, indices and segments based on dynamic limit settings ([#15986](https://github.com/opensearch-project/OpenSearch/pull/15986)) - New `phone` & `phone-search` analyzer + tokenizer ([#15915](https://github.com/opensearch-project/OpenSearch/pull/15915)) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/settings/ClusterSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/settings/ClusterSettingsIT.java index 541f1048bb246..59542f886dcac 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/settings/ClusterSettingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/settings/ClusterSettingsIT.java @@ -380,6 +380,114 @@ public void testMissingUnits() { } } + public void testThreadPoolSettings() { + // wrong threadpool + try { + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put("cluster.thread_pool.wrong.max", "-1").build()) + .get(); + fail("bogus value"); + } catch (IllegalArgumentException ex) { + assertTrue(ex.getCause().getMessage().contains("illegal thread_pool name : ")); + } + + // Scaling threadpool - negative value + try { + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put("cluster.thread_pool.snapshot.max", "-1").build()) + .get(); + fail("bogus value"); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getCause().getMessage(), "illegal value for [cluster.thread_pool.snapshot], has to be positive value"); + } + + // Scaling threadpool - Other than max and core + try { + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put("cluster.thread_pool.snapshot.wrong", "-1").build()) + .get(); + fail("bogus value"); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getCause().getMessage(), "illegal thread_pool config : [wrong] should only have [core, max]"); + } + + // Scaling threadpool - core > max + try { + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put("cluster.thread_pool.snapshot.core", "2").put("cluster.thread_pool.snapshot.max", "1").build() + ) + .get(); + fail("bogus value"); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getCause().getMessage(), "core threadpool size cannot be greater than max"); + } + + // Scaling threadpool - Max value lesser than default value of 4 + try { + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put("cluster.thread_pool.generic.max", "1").build()) + .get(); + fail("bogus value"); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getCause().getMessage(), "core threadpool size cannot be greater than max"); + } + + // Scaling threadpool - happy case - transient overrides persistent + ClusterUpdateSettingsResponse clusterUpdateSettingsResponse = client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put("cluster.thread_pool.snapshot.core", "2").put("cluster.thread_pool.snapshot.max", "2").build() + ) + .setPersistentSettings(Settings.builder().put("cluster.thread_pool.snapshot.max", "1").build()) + .get(); + assertTrue(clusterUpdateSettingsResponse.isAcknowledged()); + + // Fixed threadpool - Other than size + try { + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put("cluster.thread_pool.get.wrong", "-1").build()) + .get(); + fail("bogus value"); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getCause().getMessage(), "illegal thread_pool config : [wrong] should only have [size]"); + } + + // Fixed threadpool - 0 value + try { + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put("cluster.thread_pool.get.size", "0").build()) + .get(); + fail("bogus value"); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getCause().getMessage(), "illegal value for [cluster.thread_pool.get], has to be positive value"); + } + + // Fixed threadpool - happy case + clusterUpdateSettingsResponse = client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put("cluster.thread_pool.get.size", "1").build()) + .setPersistentSettings(Settings.builder().put("cluster.thread_pool.get.size", "1").build()) + .get(); + assertTrue(clusterUpdateSettingsResponse.isAcknowledged()); + } + public void testLoggerLevelUpdate() { assertAcked(prepareCreate("test")); diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index a84a29256ee19..f769f8729c25b 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -805,7 +805,10 @@ public void apply(Settings value, Settings current, Settings previous) { // Settings to be used for limiting rest requests ResponseLimitSettings.CAT_INDICES_RESPONSE_LIMIT_SETTING, ResponseLimitSettings.CAT_SHARDS_RESPONSE_LIMIT_SETTING, - ResponseLimitSettings.CAT_SEGMENTS_RESPONSE_LIMIT_SETTING + ResponseLimitSettings.CAT_SEGMENTS_RESPONSE_LIMIT_SETTING, + + // Thread pool Settings + ThreadPool.CLUSTER_THREAD_POOL_SIZE_SETTING ) ) ); diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 584d95b9ff6b5..e74fca60b0201 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -624,6 +624,7 @@ protected Node( additionalSettingsFilter, settingsUpgraders ); + threadPool.registerClusterSettingsListeners(settingsModule.getClusterSettings()); scriptModule.registerClusterSettingsListeners(scriptService, settingsModule.getClusterSettings()); final NetworkService networkService = new NetworkService( getCustomNameResolvers(pluginsService.filterPlugins(DiscoveryPlugin.class)) diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index e0b15e54f6e2e..269a4c87dfb72 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -38,6 +38,7 @@ import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.SizeValue; @@ -58,11 +59,14 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.RejectedExecutionException; @@ -122,6 +126,9 @@ public static class Names { public static final String REMOTE_STATE_CHECKSUM = "remote_state_checksum"; } + static Set scalingThreadPoolKeys = new HashSet<>(Arrays.asList("max", "core")); + static Set fixedThreadPoolKeys = new HashSet<>(Arrays.asList("size")); + /** * The threadpool type. * @@ -222,6 +229,12 @@ public Collection builders() { Setting.Property.NodeScope ); + public static final Setting CLUSTER_THREAD_POOL_SIZE_SETTING = Setting.groupSetting( + "cluster.thread_pool.", + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public ThreadPool(final Settings settings, final ExecutorBuilder... customBuilders) { this(settings, null, customBuilders); } @@ -403,6 +416,94 @@ public Info info(String name) { return holder.info; } + public void registerClusterSettingsListeners(ClusterSettings clusterSettings) { + clusterSettings.addSettingsUpdateConsumer(CLUSTER_THREAD_POOL_SIZE_SETTING, this::setThreadPool, this::validateSetting); + } + + /* + Scaling threadpool can provide only max and core + Fixed/ResizableQueue can provide only size + + For example valid settings would be for scaling and fixed thead pool + cluster.threadpool.snapshot.max : "5", + cluster.threadpool.snapshot.core : "5", + cluster.threadpool.get.size : "2", + */ + private void validateSetting(Settings tpSettings) { + Map tpGroups = tpSettings.getAsGroups(); + for (Map.Entry entry : tpGroups.entrySet()) { + String tpName = entry.getKey(); + if (THREAD_POOL_TYPES.containsKey(tpName) == false) { + throw new IllegalArgumentException("illegal thread_pool name : " + tpName); + } + Settings tpGroup = entry.getValue(); + ExecutorHolder holder = executors.get(tpName); + assert holder.executor instanceof OpenSearchThreadPoolExecutor; + OpenSearchThreadPoolExecutor threadPoolExecutor = (OpenSearchThreadPoolExecutor) holder.executor; + if (holder.info.type == ThreadPoolType.SCALING) { + if (scalingThreadPoolKeys.containsAll(tpGroup.keySet()) == false) { + throw new IllegalArgumentException( + "illegal thread_pool config : " + tpGroup.keySet() + " should only have " + scalingThreadPoolKeys + ); + } + int max = tpGroup.getAsInt("max", threadPoolExecutor.getMaximumPoolSize()); + int core = tpGroup.getAsInt("core", threadPoolExecutor.getCorePoolSize()); + if (core < 1 || max < 1) { + throw new IllegalArgumentException("illegal value for [cluster.thread_pool." + tpName + "], has to be positive value"); + } else if (core > max) { + throw new IllegalArgumentException("core threadpool size cannot be greater than max"); + } + } else { + if (fixedThreadPoolKeys.containsAll(tpGroup.keySet()) == false) { + throw new IllegalArgumentException( + "illegal thread_pool config : " + tpGroup.keySet() + " should only have " + fixedThreadPoolKeys + ); + } + int size = tpGroup.getAsInt("size", threadPoolExecutor.getMaximumPoolSize()); + if (size < 1) { + throw new IllegalArgumentException("illegal value for [cluster.thread_pool." + tpName + "], has to be positive value"); + } + } + } + } + + public void setThreadPool(Settings tpSettings) { + Map tpGroups = tpSettings.getAsGroups(); + for (Map.Entry entry : tpGroups.entrySet()) { + String tpName = entry.getKey(); + Settings tpGroup = entry.getValue(); + ExecutorHolder holder = executors.get(tpName); + assert holder.executor instanceof OpenSearchThreadPoolExecutor; + OpenSearchThreadPoolExecutor executor = (OpenSearchThreadPoolExecutor) holder.executor; + if (holder.info.type == ThreadPoolType.SCALING) { + int max = tpGroup.getAsInt("max", executor.getMaximumPoolSize()); + int core = tpGroup.getAsInt("core", executor.getCorePoolSize()); + /* + If we are decreasing, core pool size has to be decreased first. + If we are increasing ,max pool size has to be increased first + This ensures that core pool is always smaller than max pool size . + Other wise IllegalArgumentException will be thrown from ThreadPoolExecutor + */ + if (core < executor.getCorePoolSize()) { + executor.setCorePoolSize(core); + executor.setMaximumPoolSize(max); + } else { + executor.setMaximumPoolSize(max); + executor.setCorePoolSize(core); + } + } else { + int size = tpGroup.getAsInt("size", executor.getMaximumPoolSize()); + if (size < executor.getCorePoolSize()) { + executor.setCorePoolSize(size); + executor.setMaximumPoolSize(size); + } else { + executor.setMaximumPoolSize(size); + executor.setCorePoolSize(size); + } + } + } + } + public ThreadPoolStats stats() { List stats = new ArrayList<>(); for (ExecutorHolder holder : executors.values()) { diff --git a/server/src/test/java/org/opensearch/threadpool/ThreadPoolTests.java b/server/src/test/java/org/opensearch/threadpool/ThreadPoolTests.java index 658de5ec49500..205bf7621c576 100644 --- a/server/src/test/java/org/opensearch/threadpool/ThreadPoolTests.java +++ b/server/src/test/java/org/opensearch/threadpool/ThreadPoolTests.java @@ -36,6 +36,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.FutureUtils; import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; import org.opensearch.test.OpenSearchTestCase; import java.util.concurrent.CountDownLatch; @@ -152,4 +153,47 @@ public void testInheritContextOnSchedule() throws InterruptedException { terminate(threadPool); } } + + public void testThreadPoolResize() { + TestThreadPool threadPool = new TestThreadPool("test"); + try { + // increase it + Settings commonSettings = Settings.builder().put("snapshot.max", "10").put("snapshot.core", "2").put("get.size", "100").build(); + threadPool.setThreadPool(commonSettings); + ExecutorService executorService = threadPool.executor("snapshot"); + OpenSearchThreadPoolExecutor executor = (OpenSearchThreadPoolExecutor) executorService; + assertEquals(10, executor.getMaximumPoolSize()); + assertEquals(2, executor.getCorePoolSize()); + + executorService = threadPool.executor("get"); + executor = (OpenSearchThreadPoolExecutor) executorService; + assertEquals(100, executor.getMaximumPoolSize()); + assertEquals(100, executor.getCorePoolSize()); + + // decrease it + commonSettings = Settings.builder().put("snapshot.max", "2").put("snapshot.core", "1").put("get.size", "90").build(); + threadPool.setThreadPool(commonSettings); + executorService = threadPool.executor("snapshot"); + executor = (OpenSearchThreadPoolExecutor) executorService; + assertEquals(2, executor.getMaximumPoolSize()); + assertEquals(1, executor.getCorePoolSize()); + + executorService = threadPool.executor("get"); + executor = (OpenSearchThreadPoolExecutor) executorService; + assertEquals(90, executor.getMaximumPoolSize()); + assertEquals(90, executor.getCorePoolSize()); + } finally { + terminate(threadPool); + } + } + + public void testThreadPoolResizeFail() { + TestThreadPool threadPool = new TestThreadPool("test"); + try { + Settings commonSettings = Settings.builder().put("snapshot.max", "50").put("snapshot.core", "100").build(); + assertThrows(IllegalArgumentException.class, () -> threadPool.setThreadPool(commonSettings)); + } finally { + terminate(threadPool); + } + } } From 23d1c7a55a63250b962c1fad4e6fb962fdd156cc Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Tue, 15 Oct 2024 21:13:33 +0530 Subject: [PATCH 19/19] Fix deletion permits flow in RemoteFsTimestampAwareTranslog (#16282) --------- Signed-off-by: Sachin Kale --- .../RemoteFsTimestampAwareTranslog.java | 52 ++++- .../index/translog/RemoteFsTranslog.java | 9 +- .../transfer/TranslogTransferManager.java | 84 +++---- .../RemoteFsTimestampAwareTranslogTests.java | 213 +++++++++++++++++- .../index/translog/RemoteFsTranslogTests.java | 24 ++ 5 files changed, 317 insertions(+), 65 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java index 3ccacde22bbfc..1f54c09a04cc7 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java @@ -215,21 +215,42 @@ public void onResponse(List blobMetadata) { logger.debug(() -> "generationsToBeDeleted = " + generationsToBeDeleted); if (generationsToBeDeleted.isEmpty() == false) { // Delete stale generations - translogTransferManager.deleteGenerationAsync( - primaryTermSupplier.getAsLong(), - generationsToBeDeleted, - remoteGenerationDeletionPermits::release - ); + try { + translogTransferManager.deleteGenerationAsync( + primaryTermSupplier.getAsLong(), + generationsToBeDeleted, + remoteGenerationDeletionPermits::release + ); + } catch (Exception e) { + logger.error("Exception in delete generations flow", e); + // Release permit that is meant for metadata files and return + remoteGenerationDeletionPermits.release(); + assert remoteGenerationDeletionPermits.availablePermits() == REMOTE_DELETION_PERMITS : "Available permits " + + remoteGenerationDeletionPermits.availablePermits() + + " is not equal to " + + REMOTE_DELETION_PERMITS; + return; + } } else { remoteGenerationDeletionPermits.release(); } if (metadataFilesToBeDeleted.isEmpty() == false) { // Delete stale metadata files - translogTransferManager.deleteMetadataFilesAsync( - metadataFilesToBeDeleted, - remoteGenerationDeletionPermits::release - ); + try { + translogTransferManager.deleteMetadataFilesAsync( + metadataFilesToBeDeleted, + remoteGenerationDeletionPermits::release + ); + } catch (Exception e) { + logger.error("Exception in delete metadata files flow", e); + // Permits is already released by deleteMetadataFilesAsync + assert remoteGenerationDeletionPermits.availablePermits() == REMOTE_DELETION_PERMITS : "Available permits " + + remoteGenerationDeletionPermits.availablePermits() + + " is not equal to " + + REMOTE_DELETION_PERMITS; + return; + } // Update cache to keep only those metadata files that are not getting deleted oldFormatMetadataFileGenerationMap.keySet().retainAll(metadataFilesNotToBeDeleted); @@ -240,7 +261,12 @@ public void onResponse(List blobMetadata) { remoteGenerationDeletionPermits.release(); } } catch (Exception e) { + logger.error("Exception in trimUnreferencedReaders", e); remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); + assert remoteGenerationDeletionPermits.availablePermits() == REMOTE_DELETION_PERMITS : "Available permits " + + remoteGenerationDeletionPermits.availablePermits() + + " is not equal to " + + REMOTE_DELETION_PERMITS; } } @@ -441,7 +467,8 @@ protected static void deleteStaleRemotePrimaryTerms( } Optional minPrimaryTermFromMetadataFiles = metadataFilesNotToBeDeleted.stream().map(file -> { try { - return getMinMaxPrimaryTermFromMetadataFile(file, translogTransferManager, oldFormatMetadataFilePrimaryTermMap).v1(); + return getMinMaxPrimaryTermFromMetadataFile(file, translogTransferManager, oldFormatMetadataFilePrimaryTermMap, logger) + .v1(); } catch (IOException e) { return Long.MIN_VALUE; } @@ -482,7 +509,8 @@ protected static Long getMinPrimaryTermInRemote( protected static Tuple getMinMaxPrimaryTermFromMetadataFile( String metadataFile, TranslogTransferManager translogTransferManager, - Map> oldFormatMetadataFilePrimaryTermMap + Map> oldFormatMetadataFilePrimaryTermMap, + Logger logger ) throws IOException { Tuple minMaxPrimaryTermFromFileName = TranslogTransferMetadata.getMinMaxPrimaryTermFromFilename(metadataFile); if (minMaxPrimaryTermFromFileName != null) { @@ -504,6 +532,8 @@ protected static Tuple getMinMaxPrimaryTermFromMetadataFile( if (primaryTerm.isPresent()) { minPrimaryTem = primaryTerm.get(); } + } else { + logger.warn("No primary term found from GenerationToPrimaryTermMap for file [{}]", metadataFile); } Tuple minMaxPrimaryTermTuple = new Tuple<>(minPrimaryTem, maxPrimaryTem); oldFormatMetadataFilePrimaryTermMap.put(metadataFile, minMaxPrimaryTermTuple); diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index f5a9ed8ed9362..e697e16d5e8a0 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -590,7 +590,14 @@ protected void trimUnreferencedReaders(boolean onlyTrimLocal) throws IOException generationsToDelete.add(generation); } if (generationsToDelete.isEmpty() == false) { - deleteRemoteGeneration(generationsToDelete); + try { + deleteRemoteGeneration(generationsToDelete); + } catch (Exception e) { + logger.error("Exception in delete generations flow", e); + // Release permit that is meant for metadata files and return + remoteGenerationDeletionPermits.release(); + return; + } translogTransferManager.deleteStaleTranslogMetadataFilesAsync(remoteGenerationDeletionPermits::release); deleteStaleRemotePrimaryTerms(); } else { diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index 291218ea47499..924669d0e46a9 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -496,19 +496,24 @@ public byte[] getMetadataBytes(TranslogTransferMetadata metadata) throws IOExcep * @param onCompletion runnable to run on completion of deletion regardless of success/failure. */ public void deleteGenerationAsync(long primaryTerm, Set generations, Runnable onCompletion) { - List translogFiles = new ArrayList<>(); - generations.forEach(generation -> { - // Add .ckp and .tlog file to translog file list which is located in basePath/ - String ckpFileName = Translog.getCommitCheckpointFileName(generation); - String translogFileName = Translog.getFilename(generation); - if (isTranslogMetadataEnabled == false) { - translogFiles.addAll(List.of(ckpFileName, translogFileName)); - } else { - translogFiles.add(translogFileName); - } - }); - // Delete the translog and checkpoint files asynchronously - deleteTranslogFilesAsync(primaryTerm, translogFiles, onCompletion); + try { + List translogFiles = new ArrayList<>(); + generations.forEach(generation -> { + // Add .ckp and .tlog file to translog file list which is located in basePath/ + String ckpFileName = Translog.getCommitCheckpointFileName(generation); + String translogFileName = Translog.getFilename(generation); + if (isTranslogMetadataEnabled == false) { + translogFiles.addAll(List.of(ckpFileName, translogFileName)); + } else { + translogFiles.add(translogFileName); + } + }); + // Delete the translog and checkpoint files asynchronously + deleteTranslogFilesAsync(primaryTerm, translogFiles, onCompletion); + } catch (Exception e) { + onCompletion.run(); + throw e; + } } /** @@ -658,37 +663,32 @@ public void deleteTranslogFiles() throws IOException { * @param onCompletion runnable to run on completion of deletion regardless of success/failure. */ private void deleteTranslogFilesAsync(long primaryTerm, List files, Runnable onCompletion) { - try { - transferService.deleteBlobsAsync( - ThreadPool.Names.REMOTE_PURGE, - remoteDataTransferPath.add(String.valueOf(primaryTerm)), - files, - new ActionListener<>() { - @Override - public void onResponse(Void unused) { - fileTransferTracker.delete(files); - logger.trace("Deleted translogs for primaryTerm={} files={}", primaryTerm, files); - onCompletion.run(); - } + transferService.deleteBlobsAsync( + ThreadPool.Names.REMOTE_PURGE, + remoteDataTransferPath.add(String.valueOf(primaryTerm)), + files, + new ActionListener<>() { + @Override + public void onResponse(Void unused) { + fileTransferTracker.delete(files); + logger.trace("Deleted translogs for primaryTerm={} files={}", primaryTerm, files); + onCompletion.run(); + } - @Override - public void onFailure(Exception e) { - onCompletion.run(); - logger.error( - () -> new ParameterizedMessage( - "Exception occurred while deleting translog for primaryTerm={} files={}", - primaryTerm, - files - ), - e - ); - } + @Override + public void onFailure(Exception e) { + onCompletion.run(); + logger.error( + () -> new ParameterizedMessage( + "Exception occurred while deleting translog for primaryTerm={} files={}", + primaryTerm, + files + ), + e + ); } - ); - } catch (Exception e) { - onCompletion.run(); - throw e; - } + } + ); } /** diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java index 73db3314f4d1e..0995f2e75a17a 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java @@ -63,6 +63,7 @@ import org.mockito.Mockito; +import static org.opensearch.index.translog.RemoteFsTranslog.REMOTE_DELETION_PERMITS; import static org.opensearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED; @@ -480,10 +481,7 @@ public void onResponse(List blobMetadataList) { // we will not delete them if (dataFilesAfterTrim.equals(dataFilesBeforeTrim) == false) { // We check for number of pinned timestamp or +1 due to latest metadata. - assertTrue( - metadataFilesAfterTrim.size() == pinnedTimestamps.size() - || metadataFilesAfterTrim.size() == pinnedTimestamps.size() + 1 - ); + assertTrue(metadataFilesAfterTrim.size() >= pinnedTimestamps.size()); } for (String md : pinnedTimestampMatchingMetadataFiles) { @@ -1061,15 +1059,14 @@ public void testGetMinMaxTranslogGenerationFromMetadataFile() throws IOException public void testGetMinMaxPrimaryTermFromMetadataFile() throws IOException { TranslogTransferManager translogTransferManager = mock(TranslogTransferManager.class); - RemoteFsTimestampAwareTranslog translog = (RemoteFsTimestampAwareTranslog) this.translog; - // Fetch generations directly from the filename assertEquals( new Tuple<>(1L, 1008L), RemoteFsTimestampAwareTranslog.getMinMaxPrimaryTermFromMetadataFile( "metadata__9223372036854774799__9223372036854774799__9223370311919910393__31__9223372036854775106__1__1", translogTransferManager, - new HashMap<>() + new HashMap<>(), + logger ) ); assertEquals( @@ -1077,7 +1074,8 @@ public void testGetMinMaxPrimaryTermFromMetadataFile() throws IOException { RemoteFsTimestampAwareTranslog.getMinMaxPrimaryTermFromMetadataFile( "metadata__9223372036854775800__9223372036854775800__9223370311919910398__31__9223372036854775803__4__1", translogTransferManager, - new HashMap<>() + new HashMap<>(), + logger ) ); assertEquals( @@ -1085,7 +1083,8 @@ public void testGetMinMaxPrimaryTermFromMetadataFile() throws IOException { RemoteFsTimestampAwareTranslog.getMinMaxPrimaryTermFromMetadataFile( "metadata__9223372036854775797__9223372036854775800__9223370311919910398__31__9223372036854775803__10__1", translogTransferManager, - new HashMap<>() + new HashMap<>(), + logger ) ); @@ -1099,7 +1098,8 @@ public void testGetMinMaxPrimaryTermFromMetadataFile() throws IOException { RemoteFsTimestampAwareTranslog.getMinMaxPrimaryTermFromMetadataFile( "metadata__9223372036854775805__9223372036854774799__9223370311919910393__31__1", translogTransferManager, - new HashMap<>() + new HashMap<>(), + logger ) ); assertEquals( @@ -1107,7 +1107,8 @@ public void testGetMinMaxPrimaryTermFromMetadataFile() throws IOException { RemoteFsTimestampAwareTranslog.getMinMaxPrimaryTermFromMetadataFile( "metadata__9223372036438563903__9223372036854775800__9223370311919910398__31__1", translogTransferManager, - Map.of("metadata__9223372036438563903__9223372036854775800__9223370311919910398__31__1", new Tuple<>(4L, 7L)) + Map.of("metadata__9223372036438563903__9223372036854775800__9223370311919910398__31__1", new Tuple<>(4L, 7L)), + logger ) ); @@ -1115,6 +1116,36 @@ public void testGetMinMaxPrimaryTermFromMetadataFile() throws IOException { verify(translogTransferManager, times(0)).readMetadata( "metadata__9223372036438563903__9223372036854775800__9223370311919910398__31__1" ); + + // Older md files with empty GenerationToPrimaryTermMap + md1 = mock(TranslogTransferMetadata.class); + when(md1.getGenerationToPrimaryTermMapper()).thenReturn(Map.of()); + when(translogTransferManager.readMetadata("metadata__9223372036854775805__9223372036854774799__9223370311919910393__31__1")) + .thenReturn(md1); + assertEquals( + new Tuple<>(-1L, 2L), + RemoteFsTimestampAwareTranslog.getMinMaxPrimaryTermFromMetadataFile( + "metadata__9223372036854775805__9223372036854774799__9223370311919910393__31__1", + translogTransferManager, + new HashMap<>(), + logger + ) + ); + + // Older md files with empty GenerationToPrimaryTermMap + md1 = mock(TranslogTransferMetadata.class); + when(md1.getGenerationToPrimaryTermMapper()).thenReturn(null); + when(translogTransferManager.readMetadata("metadata__9223372036854775805__9223372036854774799__9223370311919910393__31__1")) + .thenReturn(md1); + assertEquals( + new Tuple<>(-1L, 2L), + RemoteFsTimestampAwareTranslog.getMinMaxPrimaryTermFromMetadataFile( + "metadata__9223372036854775805__9223372036854774799__9223370311919910393__31__1", + translogTransferManager, + new HashMap<>(), + logger + ) + ); } public void testDeleteStaleRemotePrimaryTerms() throws IOException { @@ -1332,4 +1363,164 @@ public void testGetMinPrimaryTermInRemoteNotFetched() throws IOException { ); verify(translogTransferManager).listPrimaryTermsInRemote(); } + + public void testTrimUnreferencedReadersStalePinnedTimestamps() throws Exception { + ArrayList ops = new ArrayList<>(); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("0", 0, primaryTerm.get(), new byte[] { 1 })); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 1, primaryTerm.get(), new byte[] { 1 })); + + // First reader is created at the init of translog + assertEquals(3, translog.readers.size()); + assertEquals(2, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size()); + assertBusy(() -> { + assertEquals(6, translog.allUploaded().size()); + assertEquals( + 6, + blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() + ); + }); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("2", 2, primaryTerm.get(), new byte[] { 1 })); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("3", 3, primaryTerm.get(), new byte[] { 1 })); + + assertBusy(() -> { + assertEquals( + 10, + blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() + ); + }); + + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + + translog.setMinSeqNoToKeep(3); + translog.trimUnreferencedReaders(); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("4", 4, primaryTerm.get(), new byte[] { 1 })); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("5", 5, primaryTerm.get(), new byte[] { 1 })); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("6", 6, primaryTerm.get(), new byte[] { 1 })); + + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + translog.setMinSeqNoToKeep(6); + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + + assertEquals(1, translog.readers.size()); + assertBusy(() -> { + assertEquals(2, translog.allUploaded().size()); + assertEquals(7, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size()); + assertEquals( + 16, + blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() + ); + }, 30, TimeUnit.SECONDS); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("7", 7, primaryTerm.get(), new byte[] { 1 })); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("8", 8, primaryTerm.get(), new byte[] { 1 })); + + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + + assertEquals(3, translog.readers.size()); + assertBusy(() -> { + assertEquals(6, translog.allUploaded().size()); + assertEquals(9, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size()); + assertEquals( + 20, + blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() + ); + }, 30, TimeUnit.SECONDS); + } + + public void testTrimUnreferencedReadersNoPermits() throws Exception { + // Acquire the permits so that remote translog deletion will not happen + translog.remoteGenerationDeletionPermits.acquire(REMOTE_DELETION_PERMITS); + + ArrayList ops = new ArrayList<>(); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("0", 0, primaryTerm.get(), new byte[] { 1 })); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 1, primaryTerm.get(), new byte[] { 1 })); + + // First reader is created at the init of translog + assertEquals(3, translog.readers.size()); + assertEquals(2, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size()); + assertBusy(() -> { + assertEquals(6, translog.allUploaded().size()); + assertEquals( + 6, + blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() + ); + }); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("2", 2, primaryTerm.get(), new byte[] { 1 })); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("3", 3, primaryTerm.get(), new byte[] { 1 })); + + assertBusy(() -> { + assertEquals( + 10, + blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() + ); + }); + + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); + // Fetch pinned timestamps so that it won't be stale + updatePinnedTimstampTask.run(); + translog.setMinSeqNoToKeep(3); + translog.trimUnreferencedReaders(); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("4", 4, primaryTerm.get(), new byte[] { 1 })); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("5", 5, primaryTerm.get(), new byte[] { 1 })); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("6", 6, primaryTerm.get(), new byte[] { 1 })); + + // Fetch pinned timestamps so that it won't be stale + updatePinnedTimstampTask.run(); + translog.setMinSeqNoToKeep(6); + translog.trimUnreferencedReaders(); + + assertEquals(1, translog.readers.size()); + assertBusy(() -> { + assertEquals(2, translog.allUploaded().size()); + assertEquals(7, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size()); + assertEquals( + 16, + blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() + ); + }, 30, TimeUnit.SECONDS); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("7", 7, primaryTerm.get(), new byte[] { 1 })); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("8", 8, primaryTerm.get(), new byte[] { 1 })); + + // Fetch pinned timestamps so that it won't be stale + updatePinnedTimstampTask.run(); + translog.trimUnreferencedReaders(); + + assertEquals(3, translog.readers.size()); + assertBusy(() -> { + assertEquals(6, translog.allUploaded().size()); + assertEquals(9, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size()); + assertEquals( + 20, + blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() + ); + }, 30, TimeUnit.SECONDS); + } + + public void testTrimUnreferencedReadersFailAlwaysRepo() throws Exception { + ArrayList ops = new ArrayList<>(); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("0", 0, primaryTerm.get(), new byte[] { 1 })); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 1, primaryTerm.get(), new byte[] { 1 })); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("2", 2, primaryTerm.get(), new byte[] { 1 })); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("3", 3, primaryTerm.get(), new byte[] { 1 })); + + translog.setMinSeqNoToKeep(2); + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); + updatePinnedTimstampTask.run(); + + fail.failAlways(); + translog.trimUnreferencedReaders(); + + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + } } diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 03c77a9a83f57..190af714d5764 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -20,6 +20,7 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.blobstore.fs.FsBlobContainer; @@ -32,6 +33,7 @@ import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; @@ -1965,6 +1967,28 @@ public void writeBlobAtomic(final String blobName, final InputStream inputStream } super.writeBlobAtomic(blobName, inputStream, blobSize, failIfAlreadyExists); } + + @Override + public void listBlobsByPrefixInSortedOrder( + String blobNamePrefix, + int limit, + BlobNameSortOrder blobNameSortOrder, + ActionListener> listener + ) { + if (fail.fail()) { + listener.onFailure(new RuntimeException("blob container throwing error")); + return; + } + if (slowDown.getSleepSeconds() > 0) { + try { + Thread.sleep(slowDown.getSleepSeconds() * 1000L); + } catch (InterruptedException e) { + listener.onFailure(new RuntimeException(e)); + return; + } + } + super.listBlobsByPrefixInSortedOrder(blobNamePrefix, limit, blobNameSortOrder, listener); + } } class TranslogThread extends Thread {