From 02f9d74ec7746ebe9f6e71fe86b127813a4e4daa Mon Sep 17 00:00:00 2001 From: Ashish Singh Date: Wed, 17 Apr 2024 14:34:22 +0530 Subject: [PATCH 01/37] Add faster scaling composite hash value encoding for remote path (#13251) Signed-off-by: Ashish Singh --- .../remotestore/RemoteRestoreSnapshotIT.java | 16 +- .../common/settings/ClusterSettings.java | 3 +- .../index/remote/RemoteStoreEnums.java | 25 +- .../RemoteStorePathStrategyResolver.java | 14 +- .../index/remote/RemoteStoreUtils.java | 36 ++- .../opensearch/indices/IndicesService.java | 23 +- .../MetadataCreateIndexServiceTests.java | 4 +- .../index/remote/RemoteStoreEnumsTests.java | 244 ++++++++++++++++-- .../RemoteStorePathStrategyResolverTests.java | 103 +++++++- .../index/remote/RemoteStoreUtilsTests.java | 116 ++++++++- ...oteStoreShardShallowCopySnapshotTests.java | 220 +++++++++++++++- .../RemoteSegmentStoreDirectoryTests.java | 2 +- .../test/OpenSearchIntegTestCase.java | 4 +- 13 files changed, 741 insertions(+), 69 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index d34a5f4edbaec..95b7d4381da18 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -59,7 +59,7 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -229,7 +229,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.FIXED)) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED)) .get(); createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); Client client = client(); @@ -260,7 +260,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX)) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX)) .get(); restoreSnapshotResponse = client.admin() @@ -272,13 +272,13 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { .get(); assertEquals(RestStatus.ACCEPTED, restoreSnapshotResponse.status()); ensureGreen(restoredIndexName1version2); - validatePathType(restoredIndexName1version2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); + validatePathType(restoredIndexName1version2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1); - // Create index with cluster setting cluster.remote_store.index.path.prefix.type as hashed_prefix. + // Create index with cluster setting cluster.remote_store.index.path.type as hashed_prefix. indexSettings = getIndexSettings(1, 0).build(); createIndex(indexName2, indexSettings); ensureGreen(indexName2); - validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); + validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1); // Validating that custom data has not changed for indexes which were created before the cluster setting got updated validatePathType(indexName1, PathType.FIXED); @@ -294,7 +294,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.FIXED)) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED)) .get(); // Close index 2 @@ -309,7 +309,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { ensureGreen(indexName2); // Validating that custom data has not changed for testindex2 which was created before the cluster setting got updated - validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); + validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1); } private void validatePathType(String index, PathType pathType) { diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index fd352b33e87fa..2904d49c224d7 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -713,7 +713,8 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, - IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, + IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, + IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, // Admission Control Settings AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java index b51abf19fc000..c1ac74724e405 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java @@ -9,6 +9,7 @@ package org.opensearch.index.remote; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.hash.FNV1a; @@ -23,6 +24,8 @@ import static java.util.Collections.unmodifiableMap; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; +import static org.opensearch.index.remote.RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding; +import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64; /** * This class contains the different enums related to remote store like data categories and types, path types @@ -30,12 +33,14 @@ * * @opensearch.api */ +@ExperimentalApi public class RemoteStoreEnums { /** * Categories of the data in Remote store. */ @PublicApi(since = "2.14.0") + @ExperimentalApi public enum DataCategory { SEGMENTS("segments", Set.of(DataType.values())), TRANSLOG("translog", Set.of(DATA, METADATA)); @@ -61,6 +66,7 @@ public String getName() { * Types of data in remote store. */ @PublicApi(since = "2.14.0") + @ExperimentalApi public enum DataType { DATA("data"), METADATA("metadata"), @@ -82,6 +88,7 @@ public String getName() { * For more information, see Github issue #12567. */ @PublicApi(since = "2.14.0") + @ExperimentalApi public enum PathType { FIXED(0) { @Override @@ -214,15 +221,29 @@ public static PathType parseString(String pathType) { * Type of hashes supported for path types that have hashing. */ @PublicApi(since = "2.14.0") + @ExperimentalApi public enum PathHashAlgorithm { - FNV_1A(0) { + FNV_1A_BASE64(0) { @Override String hash(PathInput pathInput) { String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType() .getName(); long hash = FNV1a.hash64(input); - return RemoteStoreUtils.longToUrlBase64(hash); + return longToUrlBase64(hash); + } + }, + /** + * This hash algorithm will generate a hash value which will use 1st 6 bits to create bas64 character and next 14 + * bits to create binary string. + */ + FNV_1A_COMPOSITE_1(1) { + @Override + String hash(PathInput pathInput) { + String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType() + .getName(); + long hash = FNV1a.hash64(input); + return longToCompositeBase64AndBinaryEncoding(hash, 20); } }; diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java index 5b067115df781..f6925bcbcc92d 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java @@ -25,12 +25,16 @@ public class RemoteStorePathStrategyResolver { private volatile PathType type; + private volatile PathHashAlgorithm hashAlgorithm; + private final Supplier minNodeVersionSupplier; public RemoteStorePathStrategyResolver(ClusterSettings clusterSettings, Supplier minNodeVersionSupplier) { this.minNodeVersionSupplier = minNodeVersionSupplier; - type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING); - clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, this::setType); + type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING); + hashAlgorithm = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING); + clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, this::setType); + clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, this::setHashAlgorithm); } public RemoteStorePathStrategy get() { @@ -39,11 +43,15 @@ public RemoteStorePathStrategy get() { // Min node version check ensures that we are enabling the new prefix type only when all the nodes understand it. pathType = Version.CURRENT.compareTo(minNodeVersionSupplier.get()) <= 0 ? type : PathType.FIXED; // If the path type is fixed, hash algorithm is not applicable. - pathHashAlgorithm = pathType == PathType.FIXED ? null : PathHashAlgorithm.FNV_1A; + pathHashAlgorithm = pathType == PathType.FIXED ? null : hashAlgorithm; return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); } private void setType(PathType type) { this.type = type; } + + private void setHashAlgorithm(PathHashAlgorithm hashAlgorithm) { + this.hashAlgorithm = hashAlgorithm; + } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index 7d0743e70b6cb..4d1d98334c3c4 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -15,6 +15,7 @@ import java.util.Base64; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.function.Function; @@ -26,10 +27,16 @@ public class RemoteStoreUtils { public static final int LONG_MAX_LENGTH = String.valueOf(Long.MAX_VALUE).length(); + /** + * URL safe base 64 character set. This must not be changed as this is used in deriving the base64 equivalent of binary. + */ + static final char[] URL_BASE64_CHARSET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_".toCharArray(); + /** * This method subtracts given numbers from Long.MAX_VALUE and returns a string representation of the result. * The resultant string is guaranteed to be of the same length that of Long.MAX_VALUE. If shorter, we add left padding * of 0s to the string. + * * @param num number to get the inverted long string for * @return String value of Long.MAX_VALUE - num */ @@ -46,6 +53,7 @@ public static String invertLong(long num) { /** * This method converts the given string into long and subtracts it from Long.MAX_VALUE + * * @param str long in string format to be inverted * @return long value of the invert result */ @@ -59,6 +67,7 @@ public static long invertLong(String str) { /** * Extracts the segment name from the provided segment file name + * * @param filename Segment file name to parse * @return Name of the segment that the segment file belongs to */ @@ -79,10 +88,9 @@ public static String getSegmentName(String filename) { } /** - * * @param mdFiles List of segment/translog metadata files - * @param fn Function to extract PrimaryTerm_Generation and Node Id from metadata file name . - * fn returns null if node id is not part of the file name + * @param fn Function to extract PrimaryTerm_Generation and Node Id from metadata file name . + * fn returns null if node id is not part of the file name */ public static void verifyNoMultipleWriters(List mdFiles, Function> fn) { Map nodesByPrimaryTermAndGen = new HashMap<>(); @@ -116,4 +124,26 @@ static String longToUrlBase64(long value) { String base64Str = Base64.getUrlEncoder().encodeToString(hashBytes); return base64Str.substring(0, base64Str.length() - 1); } + + static long urlBase64ToLong(String base64Str) { + byte[] hashBytes = Base64.getUrlDecoder().decode(base64Str); + return ByteBuffer.wrap(hashBytes).getLong(); + } + + /** + * Converts an input hash which occupies 64 bits of memory into a composite encoded string. The string will have 2 parts - + * 1. Base 64 string and 2. Binary String. We will use the first 6 bits for creating the base 64 string. + * For the second part, the rest of the bits (of length {@code len}-6) will be used as is in string form. + */ + static String longToCompositeBase64AndBinaryEncoding(long value, int len) { + if (len < 7 || len > 64) { + throw new IllegalArgumentException("In longToCompositeBase64AndBinaryEncoding, len must be between 7 and 64 (both inclusive)"); + } + String binaryEncoding = String.format(Locale.ROOT, "%64s", Long.toBinaryString(value)).replace(' ', '0'); + String base64Part = binaryEncoding.substring(0, 6); + String binaryPart = binaryEncoding.substring(6, len); + int base64DecimalValue = Integer.valueOf(base64Part, 2); + assert base64DecimalValue >= 0 && base64DecimalValue < 64; + return URL_BASE64_CHARSET[base64DecimalValue] + binaryPart; + } } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 7e2ea5a77cbfa..8cb240e8f6557 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -62,6 +62,7 @@ import org.opensearch.common.CheckedFunction; import org.opensearch.common.CheckedSupplier; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.cache.policy.CachedQueryResult; import org.opensearch.common.cache.service.CacheService; @@ -124,6 +125,7 @@ import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.recovery.RecoveryStats; import org.opensearch.index.refresh.RefreshStats; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.search.stats.SearchStats; @@ -307,17 +309,32 @@ public class IndicesService extends AbstractLifecycleComponent ); /** - * This setting is used to set the remote store blob store path prefix strategy. This setting is effective only for + * This setting is used to set the remote store blob store path type strategy. This setting is effective only for * remote store enabled cluster. */ - public static final Setting CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING = new Setting<>( - "cluster.remote_store.index.path.prefix.type", + @ExperimentalApi + public static final Setting CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING = new Setting<>( + "cluster.remote_store.index.path.type", PathType.FIXED.toString(), PathType::parseString, Property.NodeScope, Property.Dynamic ); + /** + * This setting is used to set the remote store blob store path hash algorithm strategy. This setting is effective only for + * remote store enabled cluster. This setting will come to effect if the {@link #CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING} + * is either {@code HASHED_PREFIX} or {@code HASHED_INFIX}. + */ + @ExperimentalApi + public static final Setting CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING = new Setting<>( + "cluster.remote_store.index.path.hash_algorithm", + PathHashAlgorithm.FNV_1A_COMPOSITE_1.toString(), + PathHashAlgorithm::parseString, + Property.NodeScope, + Property.Dynamic + ); + /** * The node's settings. */ diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index d3086de6ec89e..1a9321a755fef 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -1711,7 +1711,7 @@ public void testRemoteCustomData() { validateRemoteCustomData( indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), PathHashAlgorithm.NAME, - PathHashAlgorithm.FNV_1A.name() + PathHashAlgorithm.FNV_1A_COMPOSITE_1.name() ); } @@ -1720,7 +1720,7 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType if (remoteStoreEnabled) { settingsBuilder.put(NODE_ATTRIBUTES.getKey() + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "test"); } - settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), pathType.toString()); + settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType.toString()); Settings settings = settingsBuilder.build(); ClusterService clusterService = mock(ClusterService.class); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java index fe5635063f783..575b397382f24 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java @@ -25,7 +25,8 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; -import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A; +import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; +import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.FIXED; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_INFIX; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_PREFIX; @@ -161,10 +162,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A); + BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -178,7 +179,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("DgSI70IciXs/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/data/", result.buildAsString()); // Translog Metadata @@ -190,10 +191,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -204,7 +205,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("oKU5SjILiy4/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/metadata/", result.buildAsString()); // Translog Lock files - This is a negative case where the assertion will trip. @@ -238,10 +239,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -252,7 +253,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("AUBRfCIuWdk/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/data/", result.buildAsString()); // Segment Metadata @@ -264,10 +265,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -278,7 +279,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("erwR-G735Uw/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/metadata/", result.buildAsString()); // Segment Lockfiles @@ -290,10 +291,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -304,10 +305,197 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("KeYDIk0mJXI/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/lock_files/", result.buildAsString()); } + public void testGeneratePathForHashedPrefixTypeAndFNVCompositeHashAlgorithm() { + BlobPath blobPath = new BlobPath(); + List pathList = getPathList(); + for (String path : pathList) { + blobPath = blobPath.add(path); + } + + String indexUUID = randomAlphaOfLength(10); + String shardId = String.valueOf(randomInt(100)); + DataCategory dataCategory = TRANSLOG; + DataType dataType = DATA; + + String basePath = getPath(pathList) + indexUUID + SEPARATOR + shardId; + // Translog Data + PathInput pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + BlobPath fixedBlobPath = BlobPath.cleanPath().add("xjsdhj").add("ddjsha").add("yudy7sd").add("32hdhua7").add("89jdij"); + String fixedIndexUUID = "k2ijhe877d7yuhx7"; + String fixedShardId = "10"; + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals("D10000001001000/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/data/", result.buildAsString()); + + // Translog Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals( + "o00101001010011/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/metadata/", + result.buildAsString() + ); + + // Translog Lock files - This is a negative case where the assertion will trip. + dataType = LOCK_FILES; + PathInput finalPathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_PREFIX.path(finalPathInput, null)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_PREFIX.path(finalPathInput, null)); + + // Segment Data + dataCategory = SEGMENTS; + dataType = DATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals("A01010000000101/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/data/", result.buildAsString()); + + // Segment Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals( + "e10101111000001/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/metadata/", + result.buildAsString() + ); + + // Segment Lockfiles + dataType = LOCK_FILES; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals( + "K01111001100000/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/lock_files/", + result.buildAsString() + ); + } + public void testGeneratePathForHashedInfixType() { BlobPath blobPath = new BlobPath(); List pathList = getPathList(); @@ -330,7 +518,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - BlobPath result = HASHED_INFIX.path(pathInput, FNV_1A); + BlobPath result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); String expected = derivePath(basePath, pathInput); String actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -346,7 +534,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/DgSI70IciXs/k2ijhe877d7yuhx7/10/translog/data/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -361,7 +549,7 @@ public void testGeneratePathForHashedInfixType() { .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -374,7 +562,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/oKU5SjILiy4/k2ijhe877d7yuhx7/10/translog/metadata/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -410,7 +598,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -423,7 +611,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/AUBRfCIuWdk/k2ijhe877d7yuhx7/10/segments/data/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -437,7 +625,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -450,7 +638,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/erwR-G735Uw/k2ijhe877d7yuhx7/10/segments/metadata/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -464,7 +652,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -477,7 +665,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/KeYDIk0mJXI/k2ijhe877d7yuhx7/10/segments/lock_files/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -487,7 +675,7 @@ private String derivePath(String basePath, PathInput pathInput) { return "".equals(basePath) ? String.join( SEPARATOR, - FNV_1A.hash(pathInput), + FNV_1A_BASE64.hash(pathInput), pathInput.indexUUID(), pathInput.shardId(), pathInput.dataCategory().getName(), @@ -496,7 +684,7 @@ private String derivePath(String basePath, PathInput pathInput) { : String.join( SEPARATOR, basePath, - FNV_1A.hash(pathInput), + FNV_1A_BASE64.hash(pathInput), pathInput.indexUUID(), pathInput.shardId(), pathInput.dataCategory().getName(), diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java index 9d4b41f5c395f..4aa0d11601a05 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java @@ -11,17 +11,17 @@ import org.opensearch.Version; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.test.OpenSearchTestCase; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; public class RemoteStorePathStrategyResolverTests extends OpenSearchTestCase { public void testGetMinVersionOlder() { - Settings settings = Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), randomFrom(PathType.values())) - .build(); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.V_2_13_0); assertEquals(PathType.FIXED, resolver.get().getType()); @@ -30,7 +30,7 @@ public void testGetMinVersionOlder() { public void testGetMinVersionNewer() { PathType pathType = randomFrom(PathType.values()); - Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), pathType).build(); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); assertEquals(pathType, resolver.get().getType()); @@ -39,7 +39,100 @@ public void testGetMinVersionNewer() { } else { assertNull(resolver.get().getHashAlgorithm()); } + } + + public void testGetStrategy() { + // FIXED type + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.FIXED, resolver.get().getType()); + + // FIXED type with hash algorithm + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), randomFrom(PathHashAlgorithm.values())) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.FIXED, resolver.get().getType()); + + // HASHED_PREFIX type with FNV_1A_COMPOSITE + settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // HASHED_PREFIX type with FNV_1A_COMPOSITE + settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // HASHED_PREFIX type with FNV_1A_BASE64 + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); + // HASHED_PREFIX type with FNV_1A_BASE64 + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); } + public void testGetStrategyWithDynamicUpdate() { + + // Default value + Settings settings = Settings.builder().build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.FIXED, resolver.get().getType()); + assertNull(resolver.get().getHashAlgorithm()); + + // Set HASHED_PREFIX with default hash algorithm + clusterSettings.applySettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build() + ); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // Set HASHED_PREFIX with FNV_1A_BASE64 hash algorithm + clusterSettings.applySettings( + Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build() + ); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); + + // Set HASHED_INFIX with default hash algorithm + clusterSettings.applySettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX).build() + ); + assertEquals(PathType.HASHED_INFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // Set HASHED_INFIX with FNV_1A_BASE64 hash algorithm + clusterSettings.applySettings( + Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build() + ); + assertEquals(PathType.HASHED_INFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); + } } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index 34074861f2764..4d3e633848975 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -14,13 +14,19 @@ import org.opensearch.index.translog.transfer.TranslogTransferMetadata; import org.opensearch.test.OpenSearchTestCase; +import java.math.BigInteger; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import static org.opensearch.index.remote.RemoteStoreUtils.URL_BASE64_CHARSET; +import static org.opensearch.index.remote.RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding; import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64; +import static org.opensearch.index.remote.RemoteStoreUtils.urlBase64ToLong; import static org.opensearch.index.remote.RemoteStoreUtils.verifyNoMultipleWriters; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; @@ -28,6 +34,16 @@ public class RemoteStoreUtilsTests extends OpenSearchTestCase { + private static Map BASE64_CHARSET_IDX_MAP; + + static { + Map charToIndexMap = new HashMap<>(); + for (int i = 0; i < URL_BASE64_CHARSET.length; i++) { + charToIndexMap.put(URL_BASE64_CHARSET[i], i); + } + BASE64_CHARSET_IDX_MAP = Collections.unmodifiableMap(charToIndexMap); + } + private final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( 12, 23, @@ -205,8 +221,106 @@ public void testLongToBase64() { "6kv3yZNv9kY" ); for (Map.Entry entry : longToExpectedBase64String.entrySet()) { - assertEquals(entry.getValue(), longToUrlBase64(entry.getKey())); + String base64Str = longToUrlBase64(entry.getKey()); + assertEquals(entry.getValue(), base64Str); assertEquals(11, entry.getValue().length()); + assertEquals((long) entry.getKey(), urlBase64ToLong(base64Str)); + } + + int iters = randomInt(100); + for (int i = 0; i < iters; i++) { + long value = randomLong(); + String base64Str = longToUrlBase64(value); + assertEquals(value, urlBase64ToLong(base64Str)); } } + + public void testLongToCompositeUrlBase64AndBinaryEncodingUsing20Bits() { + Map longToExpectedBase64String = Map.of( + -5537941589147079860L, + "s11001001010100", + -5878421770170594047L, + "r10011010111010", + -5147010836697060622L, + "u00100100100010", + 937096430362711837L, + "D01000000010011", + 8422273604115462710L, + "d00111000011110", + -2528761975013221124L, + "300111010000000", + -5512387536280560513L, + "s11100000000001", + -5749656451579835857L, + "s00001101010001", + 5569654857969679538L, + "T01010010110110", + -1563884000447039930L, + "610010010111111" + ); + for (Map.Entry entry : longToExpectedBase64String.entrySet()) { + String base64Str = RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding(entry.getKey(), 20); + assertEquals(entry.getValue(), base64Str); + assertEquals(15, entry.getValue().length()); + assertEquals(longToUrlBase64(entry.getKey()).charAt(0), base64Str.charAt(0)); + } + + int iters = randomInt(1000); + for (int i = 0; i < iters; i++) { + long value = randomLong(); + assertEquals(RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding(value, 20).charAt(0), longToUrlBase64(value).charAt(0)); + } + } + + public void testLongToCompositeUrlBase64AndBinaryEncoding() { + Map longToExpectedBase64String = Map.of( + -5537941589147079860L, + "s1100100101010001110111011101001000000001101010101101001100", + -5878421770170594047L, + "r1001101011101001101000101110010101000011110000110100000001", + -5147010836697060622L, + "u0010010010001001001110100111111111100101011110101011110010", + 937096430362711837L, + "D0100000001001111000011110100001100000011100101011100011101", + 8422273604115462710L, + "d0011100001111011010011100001000110011100110111101000110110", + -2528761975013221124L, + "30011101000000010000110000110110101110100100101110011111100", + -5512387536280560513L, + "s1110000000000100001011110111011011101101001101110001111111", + -5749656451579835857L, + "s0000110101000111011110101110010111000011010000101000101111", + 5569654857969679538L, + "T0101001011011000111001010110000010110011111011110010110010", + -1563884000447039930L, + "61001001011111101111100100110010011011011111111011001000110" + ); + for (Map.Entry entry : longToExpectedBase64String.entrySet()) { + Long hashValue = entry.getKey(); + String expectedCompositeEncoding = entry.getValue(); + String actualCompositeEncoding = longToCompositeBase64AndBinaryEncoding(hashValue, 64); + assertEquals(expectedCompositeEncoding, actualCompositeEncoding); + assertEquals(59, expectedCompositeEncoding.length()); + assertEquals(longToUrlBase64(entry.getKey()).charAt(0), actualCompositeEncoding.charAt(0)); + assertEquals(RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding(hashValue, 20), actualCompositeEncoding.substring(0, 15)); + + Long computedHashValue = compositeUrlBase64BinaryEncodingToLong(actualCompositeEncoding); + assertEquals(hashValue, computedHashValue); + } + + int iters = randomInt(1000); + for (int i = 0; i < iters; i++) { + long value = randomLong(); + String compositeEncoding = longToCompositeBase64AndBinaryEncoding(value, 64); + assertEquals(value, compositeUrlBase64BinaryEncodingToLong(compositeEncoding)); + } + } + + static long compositeUrlBase64BinaryEncodingToLong(String encodedValue) { + char ch = encodedValue.charAt(0); + int base64BitsIntValue = BASE64_CHARSET_IDX_MAP.get(ch); + String base64PartBinary = Integer.toBinaryString(base64BitsIntValue); + String binaryString = base64PartBinary + encodedValue.substring(1); + return new BigInteger(binaryString, 2).longValue(); + } } diff --git a/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java b/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java index e3259a3097278..e81eef67d6704 100644 --- a/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java +++ b/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java @@ -104,7 +104,7 @@ public void testToXContent() throws IOException { + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":0}"; assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; - // Case 3 - with just hashed prefix type and hash algorithm + // Case 3 - with just hashed prefix type and FNV_1A_BASE64 hash algorithm shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( snapshot, indexVersion, @@ -119,7 +119,7 @@ public void testToXContent() throws IOException { repositoryBasePath, fileNames, PathType.HASHED_PREFIX, - PathHashAlgorithm.FNV_1A + PathHashAlgorithm.FNV_1A_BASE64 ); try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { builder.startObject(); @@ -134,6 +134,99 @@ public void testToXContent() throws IOException { + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1" + ",\"path_hash_algorithm\":0}"; assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; + + // Case 4 - with just hashed prefix type and FNV_1A_COMPOSITE hash algorithm + shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_PREFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 + ); + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { + builder.startObject(); + shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + actual = builder.toString(); + } + + expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1" + + ",\"path_hash_algorithm\":1}"; + assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; + + // Case 5 - with just hashed infix type and FNV_1A_BASE64 hash algorithm + shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_BASE64 + ); + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { + builder.startObject(); + shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + actual = builder.toString(); + } + + expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2" + + ",\"path_hash_algorithm\":0}"; + assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; + + // Case 6 - with just hashed infix type and FNV_1A_COMPOSITE hash algorithm + shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 + ); + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { + builder.startObject(); + shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + actual = builder.toString(); + } + + expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2" + + ",\"path_hash_algorithm\":1}"; + assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; } public void testFromXContent() throws IOException { @@ -223,7 +316,88 @@ public void testFromXContent() throws IOException { repositoryBasePath, fileNames, PathType.HASHED_PREFIX, - PathHashAlgorithm.FNV_1A + PathHashAlgorithm.FNV_1A_BASE64 + ); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { + RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); + assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); + } + + // with pathType=PathType.HASHED_PREFIX and pathHashAlgorithm=PathHashAlgorithm.FNV_1A_COMPOSITE + xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1,\"path_hash_algorithm\":1}"; + expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + "2", + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_PREFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 + ); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { + RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); + assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); + } + + // with pathType=PathType.HASHED_INFIX and pathHashAlgorithm=PathHashAlgorithm.FNV_1A + xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2,\"path_hash_algorithm\":0}"; + expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + "2", + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_BASE64 + ); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { + RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); + assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); + } + + // with pathType=PathType.HASHED_INFIX and pathHashAlgorithm=PathHashAlgorithm.FNV_1A_COMPOSITE + xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2,\"path_hash_algorithm\":1}"; + expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + "2", + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 ); try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); @@ -232,7 +406,7 @@ public void testFromXContent() throws IOException { } public void testFromXContentInvalid() throws IOException { - final int iters = 14; + final int iters = 18; for (int iter = 0; iter < iters; iter++) { String snapshot = "test-snapshot"; long indexVersion = 1; @@ -296,21 +470,47 @@ public void testFromXContentInvalid() throws IOException { break; case 10: version = "1"; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A; - failure = "Invalid combination of pathType=null pathHashAlgorithm=FNV_1A for version=1"; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; + failure = "Invalid combination of pathType=null pathHashAlgorithm=FNV_1A_BASE64 for version=1"; break; case 11: version = "2"; pathType = PathType.FIXED; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A; - failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=FNV_1A for version=2"; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; + failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=FNV_1A_BASE64 for version=2"; break; case 12: + version = "1"; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + failure = "Invalid combination of pathType=null pathHashAlgorithm=FNV_1A_COMPOSITE_1 for version=1"; + break; + case 13: + version = "2"; + pathType = PathType.FIXED; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=FNV_1A_COMPOSITE_1 for version=2"; + break; + case 14: version = "2"; pathType = PathType.HASHED_PREFIX; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; break; - case 13: + case 15: + version = "2"; + pathType = PathType.HASHED_PREFIX; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + break; + case 16: + version = "2"; + pathType = PathType.HASHED_INFIX; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; + break; + case 17: + version = "2"; + pathType = PathType.HASHED_INFIX; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + break; + case 18: break; default: fail("shouldn't be here"); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 44ddd2de9d007..b1e2028d761f0 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -706,7 +706,7 @@ public void testCleanupAsync() throws Exception { ShardId shardId = new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt("0")); RemoteStorePathStrategy pathStrategy = randomFrom( new RemoteStorePathStrategy(PathType.FIXED), - new RemoteStorePathStrategy(PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A) + new RemoteStorePathStrategy(randomFrom(PathType.HASHED_INFIX, PathType.HASHED_PREFIX), randomFrom(PathHashAlgorithm.values())) ); RemoteSegmentStoreDirectory.remoteDirectoryCleanup( diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index c26c3f8d21380..c8d44efd8076a 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -211,7 +211,7 @@ import static org.opensearch.index.IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; @@ -2619,7 +2619,7 @@ private static Settings buildRemoteStoreNodeAttributes( settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); } - settings.put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), randomFrom(PathType.values())); + settings.put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())); return settings.build(); } From 1c208d581aa0597615e98e27894f1f4c2dde8e75 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Wed, 17 Apr 2024 14:35:21 +0530 Subject: [PATCH 02/37] [Remote Store] Cleanup local-only translog files if no metadata in remote (#12691) Signed-off-by: Sachin Kale --- .../opensearch/remotestore/RemoteStoreIT.java | 72 +++++++++++++++++- .../index/translog/RemoteFsTranslog.java | 25 +++++- .../opensearch/index/translog/Translog.java | 38 +++++++++- .../index/translog/RemoteFsTranslogTests.java | 76 +++++++++++++++++++ 4 files changed, 205 insertions(+), 6 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index b767ffff05e3a..78441f74f6b4f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -37,6 +37,7 @@ import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.Plugin; +import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; @@ -59,6 +60,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; @@ -77,7 +79,7 @@ public class RemoteStoreIT extends RemoteStoreBaseIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Arrays.asList(MockTransportService.TestPlugin.class, MockFsRepositoryPlugin.class); } @Override @@ -789,4 +791,72 @@ public void testResumeUploadAfterFailedPrimaryRelocation() throws ExecutionExcep docs + moreDocs + uncommittedOps ); } + + // Test local only translog files which are not uploaded to remote store (no metadata present in remote) + // Without the cleanup change in RemoteFsTranslog.createEmptyTranslog, this test fails with NPE. + public void testLocalOnlyTranslogCleanupOnNodeRestart() throws Exception { + clusterSettingsSuppliedByTest = true; + + // Overriding settings to use AsyncMultiStreamBlobContainer + Settings settings = Settings.builder() + .put(super.nodeSettings(1)) + .put( + remoteStoreClusterSettings( + REPOSITORY_NAME, + segmentRepoPath, + MockFsRepositoryPlugin.TYPE, + REPOSITORY_2_NAME, + translogRepoPath, + MockFsRepositoryPlugin.TYPE + ) + ) + .build(); + + internalCluster().startClusterManagerOnlyNode(settings); + String dataNode = internalCluster().startDataOnlyNode(settings); + + // 1. Create index with 0 replica + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); + ensureGreen(INDEX_NAME); + + // 2. Index docs + int searchableDocs = 0; + for (int i = 0; i < randomIntBetween(1, 5); i++) { + indexBulk(INDEX_NAME, 15); + refresh(INDEX_NAME); + searchableDocs += 15; + } + indexBulk(INDEX_NAME, 15); + + assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), searchableDocs); + + // 3. Delete metadata from remote translog + String indexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + + String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, BlobPath.cleanPath(), "0", TRANSLOG, METADATA).buildAsString(); + Path translogMetaDataPath = Path.of(translogRepoPath + "/" + shardPath); + + try (Stream files = Files.list(translogMetaDataPath)) { + files.forEach(p -> { + try { + Files.delete(p); + } catch (IOException e) { + // Ignore + } + }); + } + + internalCluster().restartNode(dataNode); + + ensureGreen(INDEX_NAME); + + assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), searchableDocs); + indexBulk(INDEX_NAME, 15); + refresh(INDEX_NAME); + assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), searchableDocs + 15); + } } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 67799f0465c29..da905b9605dfd 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -21,6 +21,7 @@ import org.opensearch.core.util.FileSystemUtils; import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteTranslogTransferTracker; +import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.index.translog.transfer.FileTransferTracker; import org.opensearch.index.translog.transfer.TransferSnapshot; @@ -219,7 +220,7 @@ static void download(TranslogTransferManager translogTransferManager, Path locat throw ex; } - static private void downloadOnce(TranslogTransferManager translogTransferManager, Path location, Logger logger) throws IOException { + private static void downloadOnce(TranslogTransferManager translogTransferManager, Path location, Logger logger) throws IOException { logger.debug("Downloading translog files from remote"); RemoteTranslogTransferTracker statsTracker = translogTransferManager.getRemoteTranslogTransferTracker(); long prevDownloadBytesSucceeded = statsTracker.getDownloadBytesSucceeded(); @@ -254,10 +255,32 @@ static private void downloadOnce(TranslogTransferManager translogTransferManager location.resolve(Translog.getCommitCheckpointFileName(translogMetadata.getGeneration())), location.resolve(Translog.CHECKPOINT_FILE_NAME) ); + } else { + // When code flow reaches this block, it means we don't have any translog files uploaded to remote store. + // If local filesystem contains empty translog or no translog, we don't do anything. + // If local filesystem contains non-empty translog, we clean up these files and create empty translog. + logger.debug("No translog files found on remote, checking local filesystem for cleanup"); + if (FileSystemUtils.exists(location.resolve(CHECKPOINT_FILE_NAME))) { + final Checkpoint checkpoint = readCheckpoint(location); + if (isEmptyTranslog(checkpoint) == false) { + logger.debug("Translog files exist on local without any metadata in remote, cleaning up these files"); + // Creating empty translog will cleanup the older un-referenced tranlog files, we don't have to explicitly delete + Translog.createEmptyTranslog(location, translogTransferManager.getShardId(), checkpoint); + } else { + logger.debug("Empty translog on local, skipping clean-up"); + } + } } logger.debug("downloadOnce execution completed"); } + private static boolean isEmptyTranslog(Checkpoint checkpoint) { + return checkpoint.generation == checkpoint.minTranslogGeneration + && checkpoint.minSeqNo == SequenceNumbers.NO_OPS_PERFORMED + && checkpoint.maxSeqNo == SequenceNumbers.NO_OPS_PERFORMED + && checkpoint.numOps == 0; + } + public static TranslogTransferManager buildTranslogTransferManager( BlobStoreRepository blobStoreRepository, ThreadPool threadPool, diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index 7c50ed6ecd58f..c653605f8fa10 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -2011,17 +2011,47 @@ public static String createEmptyTranslog( final long primaryTerm, @Nullable final String translogUUID, @Nullable final ChannelFactory factory + ) throws IOException { + return createEmptyTranslog(location, shardId, initialGlobalCheckpoint, primaryTerm, translogUUID, factory, 1); + } + + public static String createEmptyTranslog(final Path location, final ShardId shardId, Checkpoint checkpoint) throws IOException { + final Path highestGenTranslogFile = location.resolve(getFilename(checkpoint.generation)); + final TranslogHeader translogHeader; + try (FileChannel channel = FileChannel.open(highestGenTranslogFile, StandardOpenOption.READ)) { + translogHeader = TranslogHeader.read(highestGenTranslogFile, channel); + } + final String translogUUID = translogHeader.getTranslogUUID(); + final long primaryTerm = translogHeader.getPrimaryTerm(); + final ChannelFactory channelFactory = FileChannel::open; + return Translog.createEmptyTranslog( + location, + shardId, + SequenceNumbers.NO_OPS_PERFORMED, + primaryTerm, + translogUUID, + channelFactory, + checkpoint.generation + 1 + ); + } + + public static String createEmptyTranslog( + final Path location, + final ShardId shardId, + final long initialGlobalCheckpoint, + final long primaryTerm, + @Nullable final String translogUUID, + @Nullable final ChannelFactory factory, + final long generation ) throws IOException { IOUtils.rm(location); Files.createDirectories(location); - final long generation = 1L; - final long minTranslogGeneration = 1L; final ChannelFactory channelFactory = factory != null ? factory : FileChannel::open; final String uuid = Strings.hasLength(translogUUID) ? translogUUID : UUIDs.randomBase64UUID(); final Path checkpointFile = location.resolve(CHECKPOINT_FILE_NAME); final Path translogFile = location.resolve(getFilename(generation)); - final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, generation, initialGlobalCheckpoint, minTranslogGeneration); + final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, generation, initialGlobalCheckpoint, generation); Checkpoint.write(channelFactory, checkpointFile, checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); final TranslogWriter writer = TranslogWriter.create( @@ -2031,7 +2061,7 @@ public static String createEmptyTranslog( translogFile, channelFactory, EMPTY_TRANSLOG_BUFFER_SIZE, - minTranslogGeneration, + generation, initialGlobalCheckpoint, () -> { throw new UnsupportedOperationException(); diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 0e3854f65135f..28979a3dc4f28 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -1716,6 +1716,82 @@ public void testDownloadWithRetries() throws IOException { RemoteFsTranslog.download(mockTransfer, location, logger); } + // No translog data in local as well as remote, we skip creating empty translog + public void testDownloadWithNoTranslogInLocalAndRemote() throws IOException { + Path location = createTempDir(); + + TranslogTransferManager mockTransfer = mock(TranslogTransferManager.class); + RemoteTranslogTransferTracker remoteTranslogTransferTracker = mock(RemoteTranslogTransferTracker.class); + when(mockTransfer.readMetadata()).thenReturn(null); + when(mockTransfer.getRemoteTranslogTransferTracker()).thenReturn(remoteTranslogTransferTracker); + + Path[] filesBeforeDownload = FileSystemUtils.files(location); + RemoteFsTranslog.download(mockTransfer, location, logger); + assertEquals(filesBeforeDownload, FileSystemUtils.files(location)); + } + + // No translog data in remote but non-empty translog is present in local. In this case, we delete all the files + // from local file system and create empty translog + public void testDownloadWithTranslogOnlyInLocal() throws IOException { + TranslogTransferManager mockTransfer = mock(TranslogTransferManager.class); + RemoteTranslogTransferTracker remoteTranslogTransferTracker = mock(RemoteTranslogTransferTracker.class); + when(mockTransfer.readMetadata()).thenReturn(null); + when(mockTransfer.getRemoteTranslogTransferTracker()).thenReturn(remoteTranslogTransferTracker); + + Path location = createTempDir(); + for (Path file : FileSystemUtils.files(translogDir)) { + Files.copy(file, location.resolve(file.getFileName())); + } + + Checkpoint existingCheckpoint = Translog.readCheckpoint(location); + + TranslogTransferManager finalMockTransfer = mockTransfer; + RemoteFsTranslog.download(finalMockTransfer, location, logger); + + Path[] filesPostDownload = FileSystemUtils.files(location); + assertEquals(2, filesPostDownload.length); + assertTrue( + filesPostDownload[0].getFileName().toString().contains("translog.ckp") + || filesPostDownload[1].getFileName().toString().contains("translog.ckp") + ); + + Checkpoint newEmptyTranslogCheckpoint = Translog.readCheckpoint(location); + // Verify that the new checkpoint points to empty translog + assertTrue( + newEmptyTranslogCheckpoint.generation == newEmptyTranslogCheckpoint.minTranslogGeneration + && newEmptyTranslogCheckpoint.minSeqNo == SequenceNumbers.NO_OPS_PERFORMED + && newEmptyTranslogCheckpoint.maxSeqNo == SequenceNumbers.NO_OPS_PERFORMED + && newEmptyTranslogCheckpoint.numOps == 0 + ); + assertTrue(newEmptyTranslogCheckpoint.generation > existingCheckpoint.generation); + assertEquals(newEmptyTranslogCheckpoint.globalCheckpoint, existingCheckpoint.globalCheckpoint); + } + + // No translog data in remote and empty translog in local. We skip creating another empty translog + public void testDownloadWithEmptyTranslogOnlyInLocal() throws IOException { + TranslogTransferManager mockTransfer = mock(TranslogTransferManager.class); + RemoteTranslogTransferTracker remoteTranslogTransferTracker = mock(RemoteTranslogTransferTracker.class); + when(mockTransfer.readMetadata()).thenReturn(null); + when(mockTransfer.getRemoteTranslogTransferTracker()).thenReturn(remoteTranslogTransferTracker); + + Path location = createTempDir(); + for (Path file : FileSystemUtils.files(translogDir)) { + Files.copy(file, location.resolve(file.getFileName())); + } + + TranslogTransferManager finalMockTransfer = mockTransfer; + + // download first time will ensure creating empty translog + RemoteFsTranslog.download(finalMockTransfer, location, logger); + Path[] filesPostFirstDownload = FileSystemUtils.files(location); + + // download on empty translog should be a no-op + RemoteFsTranslog.download(finalMockTransfer, location, logger); + Path[] filesPostSecondDownload = FileSystemUtils.files(location); + + assertArrayEquals(filesPostFirstDownload, filesPostSecondDownload); + } + public class ThrowingBlobRepository extends FsRepository { private final Environment environment; From 9c35a848a4a7c389b2d2a1fa3116e6041c6c7890 Mon Sep 17 00:00:00 2001 From: Ashish Singh Date: Wed, 17 Apr 2024 16:45:26 +0530 Subject: [PATCH 03/37] Add remote path settings to RemoteStoreSettings (#13225) Signed-off-by: Ashish Singh --- .../remotestore/RemoteRestoreSnapshotIT.java | 2 +- .../metadata/MetadataCreateIndexService.java | 6 +- .../common/settings/ClusterSettings.java | 6 +- .../RemoteStorePathStrategyResolver.java | 27 ++------- .../opensearch/indices/IndicesService.java | 30 ---------- .../indices/RemoteStoreSettings.java | 59 ++++++++++++++++++- .../main/java/org/opensearch/node/Node.java | 3 +- .../MetadataRolloverServiceTests.java | 10 +++- .../MetadataCreateIndexServiceTests.java | 32 ++++++---- .../MetadataIndexTemplateServiceTests.java | 4 +- .../RemoteStorePathStrategyResolverTests.java | 32 ++++++---- .../indices/cluster/ClusterStateChanges.java | 4 +- .../snapshots/SnapshotResiliencyTests.java | 3 +- .../test/OpenSearchIntegTestCase.java | 4 +- 14 files changed, 133 insertions(+), 89 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 95b7d4381da18..f8e5079b01a36 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -59,7 +59,7 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index d55ec3362b01f..0eba4d241f0fd 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -98,6 +98,7 @@ import org.opensearch.indices.IndexCreationException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidIndexNameException; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndices; import org.opensearch.indices.replication.common.ReplicationType; @@ -191,7 +192,8 @@ public MetadataCreateIndexService( final NamedXContentRegistry xContentRegistry, final SystemIndices systemIndices, final boolean forbidPrivateIndexSettings, - final AwarenessReplicaBalance awarenessReplicaBalance + final AwarenessReplicaBalance awarenessReplicaBalance, + final RemoteStoreSettings remoteStoreSettings ) { this.settings = settings; this.clusterService = clusterService; @@ -211,7 +213,7 @@ public MetadataCreateIndexService( createIndexTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.CREATE_INDEX_KEY, true); Supplier minNodeVersionSupplier = () -> clusterService.state().nodes().getMinNodeVersion(); remoteStorePathStrategyResolver = isRemoteDataAttributePresent(settings) - ? new RemoteStorePathStrategyResolver(clusterService.getClusterSettings(), minNodeVersionSupplier) + ? new RemoteStorePathStrategyResolver(remoteStoreSettings, minNodeVersionSupplier) : null; } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 2904d49c224d7..dab0f6bcf1c85 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -713,8 +713,6 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, - IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, - IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, // Admission Control Settings AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, @@ -732,7 +730,9 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, - RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING + RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING, + RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, + RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING ) ) ); diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java index f6925bcbcc92d..a33f7522daaae 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java @@ -9,10 +9,9 @@ package org.opensearch.index.remote; import org.opensearch.Version; -import org.opensearch.common.settings.ClusterSettings; import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; -import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import java.util.function.Supplier; @@ -23,35 +22,21 @@ */ public class RemoteStorePathStrategyResolver { - private volatile PathType type; - - private volatile PathHashAlgorithm hashAlgorithm; - + private final RemoteStoreSettings remoteStoreSettings; private final Supplier minNodeVersionSupplier; - public RemoteStorePathStrategyResolver(ClusterSettings clusterSettings, Supplier minNodeVersionSupplier) { + public RemoteStorePathStrategyResolver(RemoteStoreSettings remoteStoreSettings, Supplier minNodeVersionSupplier) { + this.remoteStoreSettings = remoteStoreSettings; this.minNodeVersionSupplier = minNodeVersionSupplier; - type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING); - hashAlgorithm = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING); - clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, this::setType); - clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, this::setHashAlgorithm); } public RemoteStorePathStrategy get() { PathType pathType; PathHashAlgorithm pathHashAlgorithm; // Min node version check ensures that we are enabling the new prefix type only when all the nodes understand it. - pathType = Version.CURRENT.compareTo(minNodeVersionSupplier.get()) <= 0 ? type : PathType.FIXED; + pathType = Version.CURRENT.compareTo(minNodeVersionSupplier.get()) <= 0 ? remoteStoreSettings.getPathType() : PathType.FIXED; // If the path type is fixed, hash algorithm is not applicable. - pathHashAlgorithm = pathType == PathType.FIXED ? null : hashAlgorithm; + pathHashAlgorithm = pathType == PathType.FIXED ? null : remoteStoreSettings.getPathHashAlgorithm(); return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); } - - private void setType(PathType type) { - this.type = type; - } - - private void setHashAlgorithm(PathHashAlgorithm hashAlgorithm) { - this.hashAlgorithm = hashAlgorithm; - } } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 8cb240e8f6557..0187a9fb3b8ba 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -62,7 +62,6 @@ import org.opensearch.common.CheckedFunction; import org.opensearch.common.CheckedSupplier; import org.opensearch.common.Nullable; -import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.cache.policy.CachedQueryResult; import org.opensearch.common.cache.service.CacheService; @@ -125,8 +124,6 @@ import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.recovery.RecoveryStats; import org.opensearch.index.refresh.RefreshStats; -import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; -import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.search.stats.SearchStats; import org.opensearch.index.seqno.RetentionLeaseStats; @@ -308,33 +305,6 @@ public class IndicesService extends AbstractLifecycleComponent Property.Final ); - /** - * This setting is used to set the remote store blob store path type strategy. This setting is effective only for - * remote store enabled cluster. - */ - @ExperimentalApi - public static final Setting CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING = new Setting<>( - "cluster.remote_store.index.path.type", - PathType.FIXED.toString(), - PathType::parseString, - Property.NodeScope, - Property.Dynamic - ); - - /** - * This setting is used to set the remote store blob store path hash algorithm strategy. This setting is effective only for - * remote store enabled cluster. This setting will come to effect if the {@link #CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING} - * is either {@code HASHED_PREFIX} or {@code HASHED_INFIX}. - */ - @ExperimentalApi - public static final Setting CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING = new Setting<>( - "cluster.remote_store.index.path.hash_algorithm", - PathHashAlgorithm.FNV_1A_COMPOSITE_1.toString(), - PathHashAlgorithm::parseString, - Property.NodeScope, - Property.Dynamic - ); - /** * The node's settings. */ diff --git a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java index 7f2121093f8e8..e0a9f7a9e05c1 100644 --- a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java +++ b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java @@ -8,6 +8,7 @@ package org.opensearch.indices; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; @@ -15,6 +16,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.IndexSettings; +import org.opensearch.index.remote.RemoteStoreEnums; /** * Settings for remote store @@ -65,12 +67,41 @@ public class RemoteStoreSettings { Property.Dynamic ); + /** + * This setting is used to set the remote store blob store path type strategy. This setting is effective only for + * remote store enabled cluster. + */ + @ExperimentalApi + public static final Setting CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING = new Setting<>( + "cluster.remote_store.index.path.type", + RemoteStoreEnums.PathType.FIXED.toString(), + RemoteStoreEnums.PathType::parseString, + Property.NodeScope, + Property.Dynamic + ); + + /** + * This setting is used to set the remote store blob store path hash algorithm strategy. This setting is effective only for + * remote store enabled cluster. This setting will come to effect if the {@link #CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING} + * is either {@code HASHED_PREFIX} or {@code HASHED_INFIX}. + */ + @ExperimentalApi + public static final Setting CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING = new Setting<>( + "cluster.remote_store.index.path.hash_algorithm", + RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1.toString(), + RemoteStoreEnums.PathHashAlgorithm::parseString, + Property.NodeScope, + Property.Dynamic + ); + private volatile TimeValue clusterRemoteTranslogBufferInterval; private volatile int minRemoteSegmentMetadataFiles; private volatile TimeValue clusterRemoteTranslogTransferTimeout; + private volatile RemoteStoreEnums.PathType pathType; + private volatile RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm; public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { - this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); + clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer( CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, this::setClusterRemoteTranslogBufferInterval @@ -82,11 +113,17 @@ public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { this::setMinRemoteSegmentMetadataFiles ); - this.clusterRemoteTranslogTransferTimeout = CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING.get(settings); + clusterRemoteTranslogTransferTimeout = CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer( CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING, this::setClusterRemoteTranslogTransferTimeout ); + + pathType = clusterSettings.get(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, this::setPathType); + + pathHashAlgorithm = clusterSettings.get(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, this::setPathHashAlgorithm); } public TimeValue getClusterRemoteTranslogBufferInterval() { @@ -112,4 +149,22 @@ public TimeValue getClusterRemoteTranslogTransferTimeout() { private void setClusterRemoteTranslogTransferTimeout(TimeValue clusterRemoteTranslogTransferTimeout) { this.clusterRemoteTranslogTransferTimeout = clusterRemoteTranslogTransferTimeout; } + + @ExperimentalApi + public RemoteStoreEnums.PathType getPathType() { + return pathType; + } + + @ExperimentalApi + public RemoteStoreEnums.PathHashAlgorithm getPathHashAlgorithm() { + return pathHashAlgorithm; + } + + private void setPathType(RemoteStoreEnums.PathType pathType) { + this.pathType = pathType; + } + + private void setPathHashAlgorithm(RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm) { + this.pathHashAlgorithm = pathHashAlgorithm; + } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 7fa2b6c8ff497..a33fd71e21896 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -863,7 +863,8 @@ protected Node( xContentRegistry, systemIndices, forbidPrivateIndexSettings, - awarenessReplicaBalance + awarenessReplicaBalance, + remoteStoreSettings ); pluginsService.filterPlugins(Plugin.class) .forEach( diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index da9a8b928a779..50ffd7322544a 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -81,6 +81,7 @@ import org.opensearch.index.mapper.MetadataFieldMapper; import org.opensearch.index.mapper.RoutingFieldMapper; import org.opensearch.index.shard.IndexEventListener; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidIndexNameException; import org.opensearch.indices.ShardLimitValidator; @@ -738,7 +739,8 @@ public void testRolloverClusterState() throws Exception { null, systemIndices, false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, @@ -876,7 +878,8 @@ public void testRolloverClusterStateForDataStream() throws Exception { null, systemIndices, false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, @@ -1054,7 +1057,8 @@ public void testRolloverClusterStateForDataStreamNoTemplate() throws Exception { null, new SystemIndices(emptyMap()), false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 1a9321a755fef..fad98a6609c3b 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -76,10 +76,12 @@ import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndexCreationException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidAliasNameException; import org.opensearch.indices.InvalidIndexNameException; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndexDescriptor; import org.opensearch.indices.SystemIndices; @@ -702,7 +704,8 @@ public void testValidateIndexName() throws Exception { null, new SystemIndices(Collections.emptyMap()), false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); validateIndexName( checkerService, @@ -788,7 +791,8 @@ public void testValidateDotIndex() { null, new SystemIndices(Collections.singletonMap("foo", systemIndexDescriptors)), false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); // Check deprecations assertFalse(checkerService.validateDotIndex(".test2", false)); @@ -1213,7 +1217,8 @@ public void testvalidateIndexSettings() { null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); List validationErrors = checkerService.getIndexSettingsValidationErrors(settings, false, Optional.empty()); @@ -1332,7 +1337,8 @@ public void testClusterForceReplicationTypeInValidateIndexSettings() { null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(forceClusterSettingEnabled, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(forceClusterSettingEnabled, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); // Use DOCUMENT replication type setting for index creation final Settings indexSettings = Settings.builder().put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT).build(); @@ -1457,7 +1463,8 @@ public void testRemoteStoreDisabledByUserIndexSettings() { null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); final List validationErrors = checkerService.getIndexSettingsValidationErrors( @@ -1491,7 +1498,8 @@ public void testRemoteStoreOverrideSegmentRepoIndexSettings() { null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); final List validationErrors = checkerService.getIndexSettingsValidationErrors( @@ -1530,7 +1538,8 @@ public void testRemoteStoreOverrideTranslogRepoIndexSettings() { null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); final List validationErrors = checkerService.getIndexSettingsValidationErrors( @@ -1720,7 +1729,7 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType if (remoteStoreEnabled) { settingsBuilder.put(NODE_ATTRIBUTES.getKey() + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "test"); } - settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType.toString()); + settingsBuilder.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType.toString()); Settings settings = settingsBuilder.build(); ClusterService clusterService = mock(ClusterService.class); @@ -1734,6 +1743,7 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType when(clusterService.getSettings()).thenReturn(settings); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); when(clusterService.state()).thenReturn(clusterState); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); ThreadPool threadPool = new TestThreadPool(getTestName()); MetadataCreateIndexService metadataCreateIndexService = new MetadataCreateIndexService( @@ -1749,7 +1759,8 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()), + remoteStoreSettings ); CreateIndexClusterStateUpdateRequest request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); Settings indexSettings = Settings.builder() @@ -1872,7 +1883,8 @@ public void testIndexLifecycleNameSetting() { null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); final List validationErrors = checkerService.getIndexSettingsValidationErrors(ilnSetting, true, Optional.empty()); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 0b8e64e31a523..0b99ffac67ee8 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -55,6 +55,7 @@ import org.opensearch.env.Environment; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.index.mapper.MapperService; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndexTemplateMissingException; import org.opensearch.indices.InvalidIndexTemplateException; import org.opensearch.indices.SystemIndices; @@ -2051,7 +2052,8 @@ private static List putTemplate(NamedXContentRegistry xContentRegistr xContentRegistry, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); MetadataIndexTemplateService service = new MetadataIndexTemplateService( clusterService, diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java index 4aa0d11601a05..d28ebc8c2e5da 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java @@ -13,17 +13,19 @@ import org.opensearch.common.settings.Settings; import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.test.OpenSearchTestCase; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; public class RemoteStorePathStrategyResolverTests extends OpenSearchTestCase { public void testGetMinVersionOlder() { Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.V_2_13_0); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_13_0); assertEquals(PathType.FIXED, resolver.get().getType()); assertNull(resolver.get().getHashAlgorithm()); } @@ -32,7 +34,8 @@ public void testGetMinVersionNewer() { PathType pathType = randomFrom(PathType.values()); Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); assertEquals(pathType, resolver.get().getType()); if (pathType.requiresHashAlgorithm()) { assertNotNull(resolver.get().getHashAlgorithm()); @@ -45,7 +48,8 @@ public void testGetStrategy() { // FIXED type Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); assertEquals(PathType.FIXED, resolver.get().getType()); // FIXED type with hash algorithm @@ -54,20 +58,23 @@ public void testGetStrategy() { .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), randomFrom(PathHashAlgorithm.values())) .build(); clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); assertEquals(PathType.FIXED, resolver.get().getType()); // HASHED_PREFIX type with FNV_1A_COMPOSITE settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); // HASHED_PREFIX type with FNV_1A_COMPOSITE settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); @@ -77,7 +84,8 @@ public void testGetStrategy() { .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) .build(); clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); @@ -87,7 +95,8 @@ public void testGetStrategy() { .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) .build(); clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); } @@ -97,7 +106,8 @@ public void testGetStrategyWithDynamicUpdate() { // Default value Settings settings = Settings.builder().build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); assertEquals(PathType.FIXED, resolver.get().getType()); assertNull(resolver.get().getHashAlgorithm()); diff --git a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java index dc4dca80ea110..17bd821ed0c8c 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java @@ -105,6 +105,7 @@ import org.opensearch.index.IndexService; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.shard.IndexEventListener; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndices; @@ -312,7 +313,8 @@ public IndexMetadata upgradeIndexMetadata(IndexMetadata indexMetadata, Version m xContentRegistry, systemIndices, true, - awarenessReplicaBalance + awarenessReplicaBalance, + DefaultRemoteStoreSettings.INSTANCE ); transportCloseIndexAction = new TransportCloseIndexAction( diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 4326e5fc63961..95a343f3b4025 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -2163,7 +2163,8 @@ public void onFailure(final Exception e) { namedXContentRegistry, systemIndices, false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); actions.put( CreateIndexAction.INSTANCE, diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index c8d44efd8076a..41b8c994f4ec4 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -142,6 +142,7 @@ import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.IndicesRequestCache; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.indices.store.IndicesStore; import org.opensearch.monitor.os.OsInfo; @@ -211,7 +212,6 @@ import static org.opensearch.index.IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; @@ -2619,7 +2619,7 @@ private static Settings buildRemoteStoreNodeAttributes( settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); } - settings.put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())); + settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())); return settings.build(); } From 51009b76d480ac5b03b667b6282a987d0695ca2f Mon Sep 17 00:00:00 2001 From: Atharva Sharma <60044988+atharvasharma61@users.noreply.github.com> Date: Wed, 17 Apr 2024 19:23:05 +0530 Subject: [PATCH 04/37] enabled mockTelemetryPlugin for IT and fixed OOM (#13054) * Disable stackTrace in MockSpanData by default Signed-off-by: Atharva Sharma * enabled MockTelemetryPlugin for ITs Signed-off-by: Atharva Sharma * Added the flag as system property Signed-off-by: Atharva Sharma * Applied java spotless check Signed-off-by: Atharva Sharma * Added details in changelog Signed-off-by: Atharva Sharma * Added details in TESTING.md Signed-off-by: Atharva Sharma * Update TESTING.md Signed-off-by: Atharva Sharma <60044988+atharvasharma61@users.noreply.github.com> --------- Signed-off-by: Atharva Sharma Signed-off-by: Atharva Sharma <60044988+atharvasharma61@users.noreply.github.com> --- CHANGELOG.md | 1 + TESTING.md | 7 ++++--- .../org/opensearch/test/OpenSearchIntegTestCase.java | 3 +-- .../telemetry/tracing/StrictCheckSpanProcessor.java | 12 +++++++++++- 4 files changed, 17 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fe6458937f791..22c46d3b02e9e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix issue with feature flags where default value may not be honored ([#12849](https://github.com/opensearch-project/OpenSearch/pull/12849)) - Fix UOE While building Exists query for nested search_as_you_type field ([#12048](https://github.com/opensearch-project/OpenSearch/pull/12048)) - Client with Java 8 runtime and Apache HttpClient 5 Transport fails with java.lang.NoSuchMethodError: java.nio.ByteBuffer.flip()Ljava/nio/ByteBuffer ([#13100](https://github.com/opensearch-project/opensearch-java/pull/13100)) +- Enabled mockTelemetryPlugin for IT and fixed OOM issues ([#13054](https://github.com/opensearch-project/OpenSearch/pull/13054)) - Fix implement mark() and markSupported() in class FilterStreamInput ([#13098](https://github.com/opensearch-project/OpenSearch/pull/13098)) - Fix snapshot _status API to return correct status for partial snapshots ([#12812](https://github.com/opensearch-project/OpenSearch/pull/12812)) diff --git a/TESTING.md b/TESTING.md index 1c91d60840d61..80fc2412d736b 100644 --- a/TESTING.md +++ b/TESTING.md @@ -84,6 +84,7 @@ This will instruct all JVMs (including any that run cli tools such as creating t - In order to remotely attach a debugger to the process: `--debug-jvm` - In order to set a different keystore password: `--keystore-password yourpassword` - In order to set an OpenSearch setting, provide a setting with the following prefix: `-Dtests.opensearch.` +- In order to enable stack trace of the MockSpanData during testing, add: `-Dtests.telemetry.span.stack_traces=true` (Storing stack traces alongside span data can be useful for comprehensive debugging and performance optimization during testing, as it provides insights into the exact code paths and execution sequences, facilitating efficient issue identification and resolution. Note: Enabling this might lead to OOM issues while running ITs) ## Test case filtering @@ -412,8 +413,8 @@ Say you need to make a change to `main` and have a BWC layer in `5.x`. You will You may want to run BWC tests for a secure OpenSearch cluster. In order to do this, you will need to follow a few additional steps: 1. Clone the OpenSearch Security repository from https://github.com/opensearch-project/security. -2. Get both the old version of the Security plugin (the version you wish to come from) and the new version of the Security plugin (the version you wish to go to). This can be done either by fetching the maven artifact with a command like `wget https://repo1.maven.org/maven2/org/opensearch/plugin/opensearch-security/.0/opensearch-security-.0.zip` or by running `./gradlew assemble` from the base of the Security repository. -3. Move both of the Security artifacts into new directories at the path `/security/bwc-test/src/test/resources/.0`. You should end up with two different directories in `/security/bwc-test/src/test/resources/`, one named the old version and one the new version. +2. Get both the old version of the Security plugin (the version you wish to come from) and the new version of the Security plugin (the version you wish to go to). This can be done either by fetching the maven artifact with a command like `wget https://repo1.maven.org/maven2/org/opensearch/plugin/opensearch-security/.0/opensearch-security-.0.zip` or by running `./gradlew assemble` from the base of the Security repository. +3. Move both of the Security artifacts into new directories at the path `/security/bwc-test/src/test/resources/.0`. You should end up with two different directories in `/security/bwc-test/src/test/resources/`, one named the old version and one the new version. 4. Run the following command from the base of the Security repository: ``` @@ -428,7 +429,7 @@ You may want to run BWC tests for a secure OpenSearch cluster. In order to do th `-Dtests.security.manager=false` handles access issues when attempting to read the certificates from the file system. `-Dtests.opensearch.http.protocol=https` tells the wait for cluster startup task to do the right thing. -`-PcustomDistributionUrl=...` uses a custom build of the distribution of OpenSearch. This is unnecessary when running against standard/unmodified OpenSearch core distributions. +`-PcustomDistributionUrl=...` uses a custom build of the distribution of OpenSearch. This is unnecessary when running against standard/unmodified OpenSearch core distributions. ### Skip fetching latest diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 41b8c994f4ec4..286f0a1d91b4c 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -2096,8 +2096,7 @@ protected boolean addMockGeoShapeFieldMapper() { * @return boolean. */ protected boolean addMockTelemetryPlugin() { - // setting to false until https://github.com/opensearch-project/OpenSearch/issues/12615 is resolved - return false; + return true; } /** diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java index f7ebb3ee18a9b..4e72caeea584e 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java @@ -8,6 +8,7 @@ package org.opensearch.test.telemetry.tracing; +import org.opensearch.common.Booleans; import org.opensearch.telemetry.tracing.Span; import org.opensearch.test.telemetry.tracing.validators.AllSpansAreEndedProperly; import org.opensearch.test.telemetry.tracing.validators.AllSpansHaveUniqueId; @@ -29,6 +30,14 @@ public StrictCheckSpanProcessor() {} private static Map spanMap = new ConcurrentHashMap<>(); + // If you want to see the stack trace for each spanData, then + // update the flag to true or set the corresponding system property to true + // This is helpful in debugging the tests. Default value is false. + // Note: Enabling this might lead to OOM issues while running ITs. + private static final boolean isStackTraceForSpanEnabled = Booleans.parseBoolean( + System.getProperty("tests.telemetry.span.stack_traces", "false") + ); + @Override public void onStart(Span span) { spanMap.put(span.getSpanId(), toMockSpanData(span)); @@ -53,6 +62,7 @@ public List getFinishedSpanItems() { private MockSpanData toMockSpanData(Span span) { String parentSpanId = (span.getParentSpan() != null) ? span.getParentSpan().getSpanId() : ""; + StackTraceElement[] stackTrace = isStackTraceForSpanEnabled ? Thread.currentThread().getStackTrace() : null; MockSpanData spanData = new MockSpanData( span.getSpanId(), parentSpanId, @@ -60,7 +70,7 @@ private MockSpanData toMockSpanData(Span span) { System.nanoTime(), false, span.getSpanName(), - Thread.currentThread().getStackTrace(), + stackTrace, (span instanceof MockSpan) ? ((MockSpan) span).getAttributes() : Map.of() ); return spanData; From c1d5d76006c64d806a06a3ac4c0dfc962fc13d54 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Wed, 17 Apr 2024 10:10:09 -0400 Subject: [PATCH 05/37] Update google dependencies in repository-gcs and discovery-gce (#13213) * Update google dependencies in repository-gcs and discovery-gce Signed-off-by: Craig Perkins * Add to CHANGELOG Signed-off-by: Craig Perkins * Fix test errors and mimic repository-gcs Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins --- CHANGELOG.md | 1 + buildSrc/version.properties | 1 + plugins/discovery-gce/build.gradle | 74 ++++--- .../google-api-client-1.23.0.jar.sha1 | 1 - .../google-api-client-1.35.2.jar.sha1 | 1 + ...services-compute-v1-rev160-1.23.0.jar.sha1 | 1 - ...services-compute-v1-rev235-1.25.0.jar.sha1 | 1 + .../google-http-client-1.23.0.jar.sha1 | 1 - .../google-http-client-1.44.1.jar.sha1 | 1 + .../google-http-client-gson-1.44.1.jar.sha1 | 1 + ...oogle-http-client-jackson2-1.23.0.jar.sha1 | 1 - ...oogle-http-client-jackson2-1.44.1.jar.sha1 | 1 + .../licenses/grpc-api-1.57.2.jar.sha1 | 1 + .../licenses/grpc-api-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/grpc-api-NOTICE.txt | 0 .../licenses/guava-32.1.1-jre.jar.sha1 | 1 + .../discovery-gce/licenses/guava-LICENSE.txt | 202 ++++++++++++++++++ .../discovery-gce/licenses/guava-NOTICE.txt | 0 .../licenses/opencensus-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/opencensus-NOTICE.txt | 0 .../licenses/opencensus-api-0.31.1.jar.sha1 | 1 + ...encensus-contrib-http-util-0.31.1.jar.sha1 | 1 + plugins/repository-gcs/build.gradle | 8 +- .../google-http-client-1.43.3.jar.sha1 | 1 - .../google-http-client-1.44.1.jar.sha1 | 1 + ...ogle-http-client-appengine-1.43.3.jar.sha1 | 1 - ...ogle-http-client-appengine-1.44.1.jar.sha1 | 1 + .../google-http-client-gson-1.43.3.jar.sha1 | 1 - .../google-http-client-gson-1.44.1.jar.sha1 | 1 + 29 files changed, 667 insertions(+), 42 deletions(-) delete mode 100644 plugins/discovery-gce/licenses/google-api-client-1.23.0.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/google-api-client-1.35.2.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/google-api-services-compute-v1-rev160-1.23.0.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/google-api-services-compute-v1-rev235-1.25.0.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/google-http-client-1.23.0.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/google-http-client-1.44.1.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/google-http-client-gson-1.44.1.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/google-http-client-jackson2-1.23.0.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/google-http-client-jackson2-1.44.1.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/grpc-api-1.57.2.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/grpc-api-LICENSE.txt create mode 100644 plugins/discovery-gce/licenses/grpc-api-NOTICE.txt create mode 100644 plugins/discovery-gce/licenses/guava-32.1.1-jre.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/guava-LICENSE.txt create mode 100644 plugins/discovery-gce/licenses/guava-NOTICE.txt create mode 100644 plugins/discovery-gce/licenses/opencensus-LICENSE.txt create mode 100644 plugins/discovery-gce/licenses/opencensus-NOTICE.txt create mode 100644 plugins/discovery-gce/licenses/opencensus-api-0.31.1.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/opencensus-contrib-http-util-0.31.1.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-http-client-1.44.1.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-http-client-appengine-1.44.1.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/google-http-client-gson-1.43.3.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-http-client-gson-1.44.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 22c46d3b02e9e..5efcfee3d9d9d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `gradle/wrapper-validation-action` from 2 to 3 ([#13192](https://github.com/opensearch-project/OpenSearch/pull/13192)) - Bump joda from 2.12.2 to 2.12.7 ([#13193](https://github.com/opensearch-project/OpenSearch/pull/13193)) - Bump bouncycastle from 1.77 to 1.78 ([#13243](https://github.com/opensearch-project/OpenSearch/pull/13243)) +- Update google dependencies in repository-gcs and discovery-gce ([#13213](https://github.com/opensearch-project/OpenSearch/pull/13213)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index ae9abcd58aa3a..6c6138ac9b7f6 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -22,6 +22,7 @@ antlr4 = 4.13.1 guava = 32.1.1-jre protobuf = 3.22.3 jakarta_annotation = 1.3.5 +google_http_client = 1.44.1 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 85efcc43fd65a..92cdda59d1c99 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -17,22 +17,23 @@ opensearchplugin { classname 'org.opensearch.plugin.discovery.gce.GceDiscoveryPlugin' } -versions << [ - 'google': '1.23.0' -] - dependencies { - api "com.google.apis:google-api-services-compute:v1-rev160-${versions.google}" - api "com.google.api-client:google-api-client:${versions.google}" + api "com.google.apis:google-api-services-compute:v1-rev235-1.25.0" + api "com.google.api-client:google-api-client:1.35.2" api "com.google.oauth-client:google-oauth-client:1.35.0" - api "com.google.http-client:google-http-client:${versions.google}" - api "com.google.http-client:google-http-client-jackson2:${versions.google}" + api "com.google.http-client:google-http-client:${versions.google_http_client}" + api "com.google.http-client:google-http-client-gson:${versions.google_http_client}" + api "com.google.http-client:google-http-client-jackson2:${versions.google_http_client}" api 'com.google.code.findbugs:jsr305:3.0.2' api "org.apache.httpcomponents:httpclient:${versions.httpclient}" api "org.apache.httpcomponents:httpcore:${versions.httpcore}" api "commons-logging:commons-logging:${versions.commonslogging}" api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" + api 'io.grpc:grpc-api:1.57.2' + api 'io.opencensus:opencensus-api:0.31.1' + api 'io.opencensus:opencensus-contrib-http-util:0.31.1' + runtimeOnly "com.google.guava:guava:${versions.guava}" } restResources { @@ -43,6 +44,7 @@ restResources { tasks.named("dependencyLicenses").configure { mapping from: /google-.*/, to: 'google' + mapping from: /opencensus.*/, to: 'opencensus' } check { @@ -55,26 +57,36 @@ test { systemProperty 'tests.artifact', project.name } -thirdPartyAudit.ignoreMissingClasses( - // classes are missing - 'javax.jms.Message', - 'javax.servlet.ServletContextEvent', - 'javax.servlet.ServletContextListener', - 'org.apache.avalon.framework.logger.Logger', - 'org.apache.log.Hierarchy', - 'org.apache.log.Logger', - 'com.google.api.client.json.gson.GsonFactory', - 'com.google.common.base.Preconditions', - 'com.google.common.base.Splitter', - 'com.google.common.cache.CacheBuilder', - 'com.google.common.cache.CacheLoader', - 'com.google.common.cache.LoadingCache', - 'com.google.common.collect.ImmutableMap', - 'com.google.common.collect.ImmutableMap$Builder', - 'com.google.common.collect.ImmutableSet', - 'com.google.common.collect.Lists', - 'com.google.common.collect.Multiset', - 'com.google.common.collect.SortedMultiset', - 'com.google.common.collect.TreeMultiset', - 'com.google.common.io.BaseEncoding', -) +thirdPartyAudit { + ignoreViolations( + // uses internal java api: sun.misc.Unsafe + 'com.google.common.cache.Striped64', + 'com.google.common.cache.Striped64$1', + 'com.google.common.cache.Striped64$Cell', + 'com.google.common.hash.Striped64', + 'com.google.common.hash.Striped64$1', + 'com.google.common.hash.Striped64$Cell', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', + ) + + ignoreMissingClasses( + 'com.google.api.client.http.apache.v2.ApacheHttpTransport', + 'com.google.common.util.concurrent.internal.InternalFutureFailureAccess', + 'com.google.common.util.concurrent.internal.InternalFutures', + 'com.google.gson.stream.JsonReader', + 'com.google.gson.stream.JsonToken', + 'com.google.gson.stream.JsonWriter', + 'javax.jms.Message', + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener', + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger' + ) +} diff --git a/plugins/discovery-gce/licenses/google-api-client-1.23.0.jar.sha1 b/plugins/discovery-gce/licenses/google-api-client-1.23.0.jar.sha1 deleted file mode 100644 index 0c35d8e08b91f..0000000000000 --- a/plugins/discovery-gce/licenses/google-api-client-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -522ea860eb48dee71dfe2c61a1fd09663539f556 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-api-client-1.35.2.jar.sha1 b/plugins/discovery-gce/licenses/google-api-client-1.35.2.jar.sha1 new file mode 100644 index 0000000000000..47245f9429e7d --- /dev/null +++ b/plugins/discovery-gce/licenses/google-api-client-1.35.2.jar.sha1 @@ -0,0 +1 @@ +2d737980e34c674da4ff0ae124b80caefdc7198a \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev160-1.23.0.jar.sha1 b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev160-1.23.0.jar.sha1 deleted file mode 100644 index 17219dfe7ecc9..0000000000000 --- a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev160-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -004169bfe1cf0e8b2013c9c479e43b731958bc64 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev235-1.25.0.jar.sha1 b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev235-1.25.0.jar.sha1 new file mode 100644 index 0000000000000..f79af846281de --- /dev/null +++ b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev235-1.25.0.jar.sha1 @@ -0,0 +1 @@ +67bf1ac84286b4f9ea996a90f6e91e36dc648aff \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-http-client-1.23.0.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-1.23.0.jar.sha1 deleted file mode 100644 index 5526275d5a15f..0000000000000 --- a/plugins/discovery-gce/licenses/google-http-client-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e86c84ff3c98eca6423e97780325b299133d858 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-http-client-1.44.1.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..501f268254fbc --- /dev/null +++ b/plugins/discovery-gce/licenses/google-http-client-1.44.1.jar.sha1 @@ -0,0 +1 @@ +d8956bacb8a4011365fa15a690482c49a70c78c5 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-http-client-gson-1.44.1.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-gson-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..90ddf3ddc5ee6 --- /dev/null +++ b/plugins/discovery-gce/licenses/google-http-client-gson-1.44.1.jar.sha1 @@ -0,0 +1 @@ +f3b8967c6f7078da6380687859d0873105f84d39 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-http-client-jackson2-1.23.0.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-jackson2-1.23.0.jar.sha1 deleted file mode 100644 index 510856a517f04..0000000000000 --- a/plugins/discovery-gce/licenses/google-http-client-jackson2-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd6761f4046a8cb0455e6fa5f58e12b061e9826e \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-http-client-jackson2-1.44.1.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-jackson2-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..4472ffbbebe1c --- /dev/null +++ b/plugins/discovery-gce/licenses/google-http-client-jackson2-1.44.1.jar.sha1 @@ -0,0 +1 @@ +3f1947de0fd9eb250af16abe6103c11e68d11635 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/grpc-api-1.57.2.jar.sha1 b/plugins/discovery-gce/licenses/grpc-api-1.57.2.jar.sha1 new file mode 100644 index 0000000000000..8b320fdd2f9cc --- /dev/null +++ b/plugins/discovery-gce/licenses/grpc-api-1.57.2.jar.sha1 @@ -0,0 +1 @@ +c71a006b81ddae7bc4b7cb1d2da78c1b173761f4 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/grpc-api-LICENSE.txt b/plugins/discovery-gce/licenses/grpc-api-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-gce/licenses/grpc-api-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-gce/licenses/grpc-api-NOTICE.txt b/plugins/discovery-gce/licenses/grpc-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/discovery-gce/licenses/guava-32.1.1-jre.jar.sha1 b/plugins/discovery-gce/licenses/guava-32.1.1-jre.jar.sha1 new file mode 100644 index 0000000000000..0d791b5d3f55b --- /dev/null +++ b/plugins/discovery-gce/licenses/guava-32.1.1-jre.jar.sha1 @@ -0,0 +1 @@ +ad575652d84153075dd41ec6177ccb15251262b2 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/guava-LICENSE.txt b/plugins/discovery-gce/licenses/guava-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-gce/licenses/guava-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-gce/licenses/guava-NOTICE.txt b/plugins/discovery-gce/licenses/guava-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/discovery-gce/licenses/opencensus-LICENSE.txt b/plugins/discovery-gce/licenses/opencensus-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-gce/licenses/opencensus-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-gce/licenses/opencensus-NOTICE.txt b/plugins/discovery-gce/licenses/opencensus-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/discovery-gce/licenses/opencensus-api-0.31.1.jar.sha1 b/plugins/discovery-gce/licenses/opencensus-api-0.31.1.jar.sha1 new file mode 100644 index 0000000000000..03760848f76ef --- /dev/null +++ b/plugins/discovery-gce/licenses/opencensus-api-0.31.1.jar.sha1 @@ -0,0 +1 @@ +66a60c7201c2b8b20ce495f0295b32bb0ccbbc57 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/opencensus-contrib-http-util-0.31.1.jar.sha1 b/plugins/discovery-gce/licenses/opencensus-contrib-http-util-0.31.1.jar.sha1 new file mode 100644 index 0000000000000..4e123da3ab45f --- /dev/null +++ b/plugins/discovery-gce/licenses/opencensus-contrib-http-util-0.31.1.jar.sha1 @@ -0,0 +1 @@ +3c13fc5715231fadb16a9b74a44d9d59c460cfa8 \ No newline at end of file diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 1dfc64e19601c..c4b1ab8d6875e 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -75,10 +75,10 @@ dependencies { runtimeOnly "com.google.guava:guava:${versions.guava}" api 'com.google.guava:failureaccess:1.0.1' - api 'com.google.http-client:google-http-client:1.43.3' - api 'com.google.http-client:google-http-client-appengine:1.43.3' - api 'com.google.http-client:google-http-client-gson:1.43.3' - api 'com.google.http-client:google-http-client-jackson2:1.44.1' + api "com.google.http-client:google-http-client:${versions.google_http_client}" + api "com.google.http-client:google-http-client-appengine:${versions.google_http_client}" + api "com.google.http-client:google-http-client-gson:${versions.google_http_client}" + api "com.google.http-client:google-http-client-jackson2:${versions.google_http_client}" api 'com.google.oauth-client:google-oauth-client:1.34.1' diff --git a/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 deleted file mode 100644 index 800467de8bdf3..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a758b82e55a2f5f681e289c5ed384d3dbda6f3cd \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.44.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..501f268254fbc --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-1.44.1.jar.sha1 @@ -0,0 +1 @@ +d8956bacb8a4011365fa15a690482c49a70c78c5 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 deleted file mode 100644 index 4adcca6a55902..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09d6cbdde6ea3469a67601a811b4e83de3e68a79 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.44.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..7b27b165453cd --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-appengine-1.44.1.jar.sha1 @@ -0,0 +1 @@ +da4f9f691edb7a9f00cd806157a4990cb7e07711 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-gson-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-gson-1.43.3.jar.sha1 deleted file mode 100644 index 43f4fe4a127e1..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-gson-1.43.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -252e267acf720ef6333488740a696a1d5e204639 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-gson-1.44.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-gson-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..90ddf3ddc5ee6 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-gson-1.44.1.jar.sha1 @@ -0,0 +1 @@ +f3b8967c6f7078da6380687859d0873105f84d39 \ No newline at end of file From 84679dea01cdae9d50bd9cd6b8c39062df958d40 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 17 Apr 2024 14:12:14 -0400 Subject: [PATCH 06/37] Snapshot _status API to return correct status for partial snapshots (update version) (#13262) Signed-off-by: Andriy Redko --- .../main/java/org/opensearch/cluster/SnapshotsInProgress.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index 8dbdcaa541734..d658f38430dd9 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -747,7 +747,7 @@ public void writeTo(StreamOutput out) throws IOException { snapshot.writeTo(out); out.writeBoolean(includeGlobalState); out.writeBoolean(partial); - if ((out.getVersion().before(Version.V_3_0_0)) && state == State.PARTIAL) { + if ((out.getVersion().before(Version.V_2_14_0)) && state == State.PARTIAL) { // Setting to SUCCESS for partial snapshots in older versions to maintain backward compatibility out.writeByte(State.SUCCESS.value()); } else { From 61ff5f8d53bbb5790466544125553bff9f0c8ccc Mon Sep 17 00:00:00 2001 From: Lakshya Taragi <157457166+ltaragi@users.noreply.github.com> Date: Thu, 18 Apr 2024 17:03:35 +0530 Subject: [PATCH 07/37] Fix flakiness in testDontAllowSwitchingCompatibilityModeForClusterWithMultipleVersions (#13281) Signed-off-by: Lakshya Taragi --- .../TransportClusterManagerNodeActionTests.java | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java index b3c58164fccbb..b3eb2443fa940 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java @@ -91,8 +91,6 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.opensearch.test.ClusterServiceUtils.setState; -import static org.opensearch.test.VersionUtils.randomCompatibleVersion; -import static org.opensearch.test.VersionUtils.randomOpenSearchVersion; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -826,10 +824,9 @@ public void testDontAllowSwitchingCompatibilityModeForClusterWithMultipleVersion request.persistentSettings(intendedCompatibilityModeSettings); // two different but compatible open search versions for the discovery nodes - final Version version1 = randomOpenSearchVersion(random()); - final Version version2 = randomCompatibleVersion(random(), version1); + final Version version1 = Version.V_2_13_0; + final Version version2 = Version.V_2_13_1; - assert version1.equals(version2) == false : "current nodes in the cluster must be of different versions"; DiscoveryNode discoveryNode1 = new DiscoveryNode( UUIDs.base64UUID(), buildNewFakeTransportAddress(), From cdb57fa475cc622c7255c045cd26725bd69960a0 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Thu, 18 Apr 2024 19:42:43 +0800 Subject: [PATCH 08/37] Update supported version in yaml test file for the primary_only parameter in force-merge API (#13279) * Update supported version for the primary_only parameter in force-merge API Signed-off-by: Gao Binlong * Update reason to make it consitent with 2.x Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong --- .../rest-api-spec/test/indices.forcemerge/10_basic.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml index 39fb1604d9596..7410e020e1a91 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml @@ -31,8 +31,8 @@ --- "Test primary_only parameter": - skip: - version: " - 2.99.99" - reason: "primary_only is available in 3.0+" + version: " - 2.12.99" + reason: "primary_only is available in 2.13.0+" - do: indices.create: From c4843c9bd792f1e2017319a4b20c12b1b3448378 Mon Sep 17 00:00:00 2001 From: Mohit Godwani <81609427+mgodwan@users.noreply.github.com> Date: Thu, 18 Apr 2024 18:25:01 +0530 Subject: [PATCH 09/37] Add release notes for 1.3.16 (#13280) Signed-off-by: mgodwan --- release-notes/opensearch.release-notes-1.3.16.md | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 release-notes/opensearch.release-notes-1.3.16.md diff --git a/release-notes/opensearch.release-notes-1.3.16.md b/release-notes/opensearch.release-notes-1.3.16.md new file mode 100644 index 0000000000000..a434e419d5780 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.16.md @@ -0,0 +1,4 @@ +## 2024-04-18 Version 1.3.16 Release Notes + +### Upgrades +- Bump `netty` from 4.1.107.Final to 4.1.109.Final ([#12924](https://github.com/opensearch-project/OpenSearch/pull/12924), [#13233](https://github.com/opensearch-project/OpenSearch/pull/13233)) From ba25c234d91e0ab948e3387dc87785d8e99fba9e Mon Sep 17 00:00:00 2001 From: Peter Nied Date: Thu, 18 Apr 2024 10:00:44 -0500 Subject: [PATCH 10/37] Disabling breaking changes check on main (#13283) Signed-off-by: Peter Nied --- .github/workflows/detect-breaking-change.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/detect-breaking-change.yml b/.github/workflows/detect-breaking-change.yml index 1913d070e8c24..e5d3fddbd36f5 100644 --- a/.github/workflows/detect-breaking-change.yml +++ b/.github/workflows/detect-breaking-change.yml @@ -1,6 +1,8 @@ name: "Detect Breaking Changes" on: - pull_request + pull_request: + branches-ignore: + - main # This branch represents a to-be-released version of OpenSearch where breaking changes are allowed jobs: detect-breaking-change: From f5c3ef9fa329df83083dc607ccdb74f5c65b3198 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Thu, 18 Apr 2024 09:00:46 -0700 Subject: [PATCH 11/37] Replacing InboundMessage with NativeInboundMessage for deprecation (#13126) * Replacing InboundMessage with NativeInboundMessage for deprecation Signed-off-by: Vacha Shah * Removing InboundMessage class Signed-off-by: Vacha Shah --------- Signed-off-by: Vacha Shah --- .../transport/InboundAggregator.java | 7 +- .../opensearch/transport/InboundMessage.java | 108 ------------------ .../transport/NativeMessageHandler.java | 7 +- .../opensearch/transport/TcpTransport.java | 12 -- .../opensearch/transport/TransportLogger.java | 5 +- .../NativeInboundBytesHandler.java | 5 +- .../transport/InboundAggregatorTests.java | 13 ++- .../transport/InboundHandlerTests.java | 75 +++++++++--- .../transport/InboundPipelineTests.java | 3 +- .../transport/OutboundHandlerTests.java | 3 +- 10 files changed, 80 insertions(+), 158 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/transport/InboundMessage.java diff --git a/server/src/main/java/org/opensearch/transport/InboundAggregator.java b/server/src/main/java/org/opensearch/transport/InboundAggregator.java index e894331f3b64e..f52875d880b4f 100644 --- a/server/src/main/java/org/opensearch/transport/InboundAggregator.java +++ b/server/src/main/java/org/opensearch/transport/InboundAggregator.java @@ -40,6 +40,7 @@ import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.bytes.CompositeBytesReference; +import org.opensearch.transport.nativeprotocol.NativeInboundMessage; import java.io.IOException; import java.util.ArrayList; @@ -113,7 +114,7 @@ public void aggregate(ReleasableBytesReference content) { } } - public InboundMessage finishAggregation() throws IOException { + public NativeInboundMessage finishAggregation() throws IOException { ensureOpen(); final ReleasableBytesReference releasableContent; if (isFirstContent()) { @@ -127,7 +128,7 @@ public InboundMessage finishAggregation() throws IOException { } final BreakerControl breakerControl = new BreakerControl(circuitBreaker); - final InboundMessage aggregated = new InboundMessage(currentHeader, releasableContent, breakerControl); + final NativeInboundMessage aggregated = new NativeInboundMessage(currentHeader, releasableContent, breakerControl); boolean success = false; try { if (aggregated.getHeader().needsToReadVariableHeader()) { @@ -142,7 +143,7 @@ public InboundMessage finishAggregation() throws IOException { if (isShortCircuited()) { aggregated.close(); success = true; - return new InboundMessage(aggregated.getHeader(), aggregationException); + return new NativeInboundMessage(aggregated.getHeader(), aggregationException); } else { success = true; return aggregated; diff --git a/server/src/main/java/org/opensearch/transport/InboundMessage.java b/server/src/main/java/org/opensearch/transport/InboundMessage.java deleted file mode 100644 index 5c68257557061..0000000000000 --- a/server/src/main/java/org/opensearch/transport/InboundMessage.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.transport; - -import org.opensearch.common.annotation.DeprecatedApi; -import org.opensearch.common.bytes.ReleasableBytesReference; -import org.opensearch.common.lease.Releasable; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.transport.nativeprotocol.NativeInboundMessage; - -import java.io.IOException; - -/** - * Inbound data as a message - * This api is deprecated, please use {@link org.opensearch.transport.nativeprotocol.NativeInboundMessage} instead. - * @opensearch.api - */ -@DeprecatedApi(since = "2.14.0") -public class InboundMessage implements Releasable, ProtocolInboundMessage { - - private final NativeInboundMessage nativeInboundMessage; - - public InboundMessage(Header header, ReleasableBytesReference content, Releasable breakerRelease) { - this.nativeInboundMessage = new NativeInboundMessage(header, content, breakerRelease); - } - - public InboundMessage(Header header, Exception exception) { - this.nativeInboundMessage = new NativeInboundMessage(header, exception); - } - - public InboundMessage(Header header, boolean isPing) { - this.nativeInboundMessage = new NativeInboundMessage(header, isPing); - } - - public Header getHeader() { - return this.nativeInboundMessage.getHeader(); - } - - public int getContentLength() { - return this.nativeInboundMessage.getContentLength(); - } - - public Exception getException() { - return this.nativeInboundMessage.getException(); - } - - public boolean isPing() { - return this.nativeInboundMessage.isPing(); - } - - public boolean isShortCircuit() { - return this.nativeInboundMessage.getException() != null; - } - - public Releasable takeBreakerReleaseControl() { - return this.nativeInboundMessage.takeBreakerReleaseControl(); - } - - public StreamInput openOrGetStreamInput() throws IOException { - return this.nativeInboundMessage.openOrGetStreamInput(); - } - - @Override - public void close() { - this.nativeInboundMessage.close(); - } - - @Override - public String toString() { - return this.nativeInboundMessage.toString(); - } - - @Override - public String getProtocol() { - return this.nativeInboundMessage.getProtocol(); - } - -} diff --git a/server/src/main/java/org/opensearch/transport/NativeMessageHandler.java b/server/src/main/java/org/opensearch/transport/NativeMessageHandler.java index 861b95a8098f2..c5b65f9eb7a11 100644 --- a/server/src/main/java/org/opensearch/transport/NativeMessageHandler.java +++ b/server/src/main/java/org/opensearch/transport/NativeMessageHandler.java @@ -51,6 +51,7 @@ import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.telemetry.tracing.channels.TraceableTcpTransportChannel; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.nativeprotocol.NativeInboundMessage; import java.io.EOFException; import java.io.IOException; @@ -111,7 +112,7 @@ public void messageReceived( long slowLogThresholdMs, TransportMessageListener messageListener ) throws IOException { - InboundMessage inboundMessage = (InboundMessage) message; + NativeInboundMessage inboundMessage = (NativeInboundMessage) message; TransportLogger.logInboundMessage(channel, inboundMessage); if (inboundMessage.isPing()) { keepAlive.receiveKeepAlive(channel); @@ -122,7 +123,7 @@ public void messageReceived( private void handleMessage( TcpChannel channel, - InboundMessage message, + NativeInboundMessage message, long startTime, long slowLogThresholdMs, TransportMessageListener messageListener @@ -194,7 +195,7 @@ private Map> extractHeaders(Map heade private void handleRequest( TcpChannel channel, Header header, - InboundMessage message, + NativeInboundMessage message, TransportMessageListener messageListener ) throws IOException { final String action = header.getActionName(); diff --git a/server/src/main/java/org/opensearch/transport/TcpTransport.java b/server/src/main/java/org/opensearch/transport/TcpTransport.java index e32bba5e836d3..8ba0178577232 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransport.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransport.java @@ -761,18 +761,6 @@ protected void serverAcceptedChannel(TcpChannel channel) { */ protected abstract void stopInternal(); - /** - * @deprecated use {@link #inboundMessage(TcpChannel, ProtocolInboundMessage)} - * Handles inbound message that has been decoded. - * - * @param channel the channel the message is from - * @param message the message - */ - @Deprecated(since = "2.14.0", forRemoval = true) - public void inboundMessage(TcpChannel channel, InboundMessage message) { - inboundMessage(channel, (ProtocolInboundMessage) message); - } - /** * Handles inbound message that has been decoded. * diff --git a/server/src/main/java/org/opensearch/transport/TransportLogger.java b/server/src/main/java/org/opensearch/transport/TransportLogger.java index 997b3bb5ba18e..e780f643aafd7 100644 --- a/server/src/main/java/org/opensearch/transport/TransportLogger.java +++ b/server/src/main/java/org/opensearch/transport/TransportLogger.java @@ -40,6 +40,7 @@ import org.opensearch.core.common.io.stream.InputStreamStreamInput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.compress.CompressorRegistry; +import org.opensearch.transport.nativeprotocol.NativeInboundMessage; import java.io.IOException; @@ -64,7 +65,7 @@ static void logInboundMessage(TcpChannel channel, BytesReference message) { } } - static void logInboundMessage(TcpChannel channel, InboundMessage message) { + static void logInboundMessage(TcpChannel channel, NativeInboundMessage message) { if (logger.isTraceEnabled()) { try { String logMessage = format(channel, message, "READ"); @@ -136,7 +137,7 @@ private static String format(TcpChannel channel, BytesReference message, String return sb.toString(); } - private static String format(TcpChannel channel, InboundMessage message, String event) throws IOException { + private static String format(TcpChannel channel, NativeInboundMessage message, String event) throws IOException { final StringBuilder sb = new StringBuilder(); sb.append(channel); diff --git a/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundBytesHandler.java b/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundBytesHandler.java index a8a4c0da7ec0f..97981aeb6736e 100644 --- a/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundBytesHandler.java +++ b/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundBytesHandler.java @@ -16,7 +16,6 @@ import org.opensearch.transport.InboundAggregator; import org.opensearch.transport.InboundBytesHandler; import org.opensearch.transport.InboundDecoder; -import org.opensearch.transport.InboundMessage; import org.opensearch.transport.ProtocolInboundMessage; import org.opensearch.transport.StatsTracker; import org.opensearch.transport.TcpChannel; @@ -32,7 +31,7 @@ public class NativeInboundBytesHandler implements InboundBytesHandler { private static final ThreadLocal> fragmentList = ThreadLocal.withInitial(ArrayList::new); - private static final InboundMessage PING_MESSAGE = new InboundMessage(null, true); + private static final NativeInboundMessage PING_MESSAGE = new NativeInboundMessage(null, true); private final ArrayDeque pending; private final InboundDecoder decoder; @@ -152,7 +151,7 @@ private void forwardFragments( messageHandler.accept(channel, PING_MESSAGE); } else if (fragment == InboundDecoder.END_CONTENT) { assert aggregator.isAggregating(); - try (InboundMessage aggregated = aggregator.finishAggregation()) { + try (NativeInboundMessage aggregated = aggregator.finishAggregation()) { statsTracker.markMessageReceived(); messageHandler.accept(channel, aggregated); } diff --git a/server/src/test/java/org/opensearch/transport/InboundAggregatorTests.java b/server/src/test/java/org/opensearch/transport/InboundAggregatorTests.java index 2dd98a8efe2a3..4ac78366360d7 100644 --- a/server/src/test/java/org/opensearch/transport/InboundAggregatorTests.java +++ b/server/src/test/java/org/opensearch/transport/InboundAggregatorTests.java @@ -42,6 +42,7 @@ import org.opensearch.core.common.breaker.CircuitBreakingException; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.nativeprotocol.NativeInboundMessage; import org.junit.Before; import java.io.IOException; @@ -107,7 +108,7 @@ public void testInboundAggregation() throws IOException { } // Signal EOS - InboundMessage aggregated = aggregator.finishAggregation(); + NativeInboundMessage aggregated = aggregator.finishAggregation(); assertThat(aggregated, notNullValue()); assertFalse(aggregated.isPing()); @@ -138,7 +139,7 @@ public void testInboundUnknownAction() throws IOException { assertEquals(0, content.refCount()); // Signal EOS - InboundMessage aggregated = aggregator.finishAggregation(); + NativeInboundMessage aggregated = aggregator.finishAggregation(); assertThat(aggregated, notNullValue()); assertTrue(aggregated.isShortCircuit()); @@ -161,7 +162,7 @@ public void testCircuitBreak() throws IOException { content1.close(); // Signal EOS - InboundMessage aggregated1 = aggregator.finishAggregation(); + NativeInboundMessage aggregated1 = aggregator.finishAggregation(); assertEquals(0, content1.refCount()); assertThat(aggregated1, notNullValue()); @@ -180,7 +181,7 @@ public void testCircuitBreak() throws IOException { content2.close(); // Signal EOS - InboundMessage aggregated2 = aggregator.finishAggregation(); + NativeInboundMessage aggregated2 = aggregator.finishAggregation(); assertEquals(1, content2.refCount()); assertThat(aggregated2, notNullValue()); @@ -199,7 +200,7 @@ public void testCircuitBreak() throws IOException { content3.close(); // Signal EOS - InboundMessage aggregated3 = aggregator.finishAggregation(); + NativeInboundMessage aggregated3 = aggregator.finishAggregation(); assertEquals(1, content3.refCount()); assertThat(aggregated3, notNullValue()); @@ -263,7 +264,7 @@ public void testFinishAggregationWillFinishHeader() throws IOException { content.close(); // Signal EOS - InboundMessage aggregated = aggregator.finishAggregation(); + NativeInboundMessage aggregated = aggregator.finishAggregation(); assertThat(aggregated, notNullValue()); assertFalse(header.needsToReadVariableHeader()); diff --git a/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java b/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java index 0d171e17e70e1..2dde27d62e759 100644 --- a/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java @@ -56,6 +56,7 @@ import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.nativeprotocol.NativeInboundMessage; import org.junit.After; import org.junit.Before; @@ -142,7 +143,7 @@ public void testPing() throws Exception { ); requestHandlers.registerHandler(registry); - handler.inboundMessage(channel, new InboundMessage(null, true)); + handler.inboundMessage(channel, new NativeInboundMessage(null, true)); if (channel.isServerChannel()) { BytesReference ping = channel.getMessageCaptor().get(); assertEquals('E', ping.get(0)); @@ -208,7 +209,11 @@ public TestResponse read(StreamInput in) throws IOException { BytesReference fullRequestBytes = request.serialize(new BytesStreamOutput()); BytesReference requestContent = fullRequestBytes.slice(headerSize, fullRequestBytes.length() - headerSize); Header requestHeader = new Header(fullRequestBytes.length() - 6, requestId, TransportStatus.setRequest((byte) 0), version); - InboundMessage requestMessage = new InboundMessage(requestHeader, ReleasableBytesReference.wrap(requestContent), () -> {}); + NativeInboundMessage requestMessage = new NativeInboundMessage( + requestHeader, + ReleasableBytesReference.wrap(requestContent), + () -> {} + ); requestHeader.finishParsingHeader(requestMessage.openOrGetStreamInput()); handler.inboundMessage(channel, requestMessage); @@ -229,7 +234,11 @@ public TestResponse read(StreamInput in) throws IOException { BytesReference fullResponseBytes = channel.getMessageCaptor().get(); BytesReference responseContent = fullResponseBytes.slice(headerSize, fullResponseBytes.length() - headerSize); Header responseHeader = new Header(fullResponseBytes.length() - 6, requestId, responseStatus, version); - InboundMessage responseMessage = new InboundMessage(responseHeader, ReleasableBytesReference.wrap(responseContent), () -> {}); + NativeInboundMessage responseMessage = new NativeInboundMessage( + responseHeader, + ReleasableBytesReference.wrap(responseContent), + () -> {} + ); responseHeader.finishParsingHeader(responseMessage.openOrGetStreamInput()); handler.inboundMessage(channel, responseMessage); @@ -256,7 +265,7 @@ public void testSendsErrorResponseToHandshakeFromCompatibleVersion() throws Exce TransportStatus.setRequest(TransportStatus.setHandshake((byte) 0)), remoteVersion ); - final InboundMessage requestMessage = unreadableInboundHandshake(remoteVersion, requestHeader); + final NativeInboundMessage requestMessage = unreadableInboundHandshake(remoteVersion, requestHeader); requestHeader.actionName = TransportHandshaker.HANDSHAKE_ACTION_NAME; requestHeader.headers = Tuple.tuple(Map.of(), Map.of()); requestHeader.features = Set.of(); @@ -296,7 +305,7 @@ public void testClosesChannelOnErrorInHandshakeWithIncompatibleVersion() throws TransportStatus.setRequest(TransportStatus.setHandshake((byte) 0)), remoteVersion ); - final InboundMessage requestMessage = unreadableInboundHandshake(remoteVersion, requestHeader); + final NativeInboundMessage requestMessage = unreadableInboundHandshake(remoteVersion, requestHeader); requestHeader.actionName = TransportHandshaker.HANDSHAKE_ACTION_NAME; requestHeader.headers = Tuple.tuple(Map.of(), Map.of()); requestHeader.features = Set.of(); @@ -327,13 +336,17 @@ public void testLogsSlowInboundProcessing() throws Exception { TransportStatus.setRequest(TransportStatus.setHandshake((byte) 0)), remoteVersion ); - final InboundMessage requestMessage = new InboundMessage(requestHeader, ReleasableBytesReference.wrap(BytesArray.EMPTY), () -> { - try { - TimeUnit.SECONDS.sleep(1L); - } catch (InterruptedException e) { - throw new AssertionError(e); + final NativeInboundMessage requestMessage = new NativeInboundMessage( + requestHeader, + ReleasableBytesReference.wrap(BytesArray.EMPTY), + () -> { + try { + TimeUnit.SECONDS.sleep(1L); + } catch (InterruptedException e) { + throw new AssertionError(e); + } } - }); + ); requestHeader.actionName = TransportHandshaker.HANDSHAKE_ACTION_NAME; requestHeader.headers = Tuple.tuple(Collections.emptyMap(), Collections.emptyMap()); requestHeader.features = Set.of(); @@ -407,7 +420,11 @@ public void onResponseSent(long requestId, String action, Exception error) { BytesReference fullRequestBytes = BytesReference.fromByteBuffer((ByteBuffer) buffer.flip()); BytesReference requestContent = fullRequestBytes.slice(headerSize, fullRequestBytes.length() - headerSize); Header requestHeader = new Header(fullRequestBytes.length() - 6, requestId, TransportStatus.setRequest((byte) 0), version); - InboundMessage requestMessage = new InboundMessage(requestHeader, ReleasableBytesReference.wrap(requestContent), () -> {}); + NativeInboundMessage requestMessage = new NativeInboundMessage( + requestHeader, + ReleasableBytesReference.wrap(requestContent), + () -> {} + ); requestHeader.finishParsingHeader(requestMessage.openOrGetStreamInput()); handler.inboundMessage(channel, requestMessage); @@ -474,7 +491,11 @@ public void onResponseSent(long requestId, String action, Exception error) { // Create the request payload by intentionally stripping 1 byte away BytesReference requestContent = fullRequestBytes.slice(headerSize, fullRequestBytes.length() - headerSize - 1); Header requestHeader = new Header(fullRequestBytes.length() - 6, requestId, TransportStatus.setRequest((byte) 0), version); - InboundMessage requestMessage = new InboundMessage(requestHeader, ReleasableBytesReference.wrap(requestContent), () -> {}); + NativeInboundMessage requestMessage = new NativeInboundMessage( + requestHeader, + ReleasableBytesReference.wrap(requestContent), + () -> {} + ); requestHeader.finishParsingHeader(requestMessage.openOrGetStreamInput()); handler.inboundMessage(channel, requestMessage); @@ -540,7 +561,11 @@ public TestResponse read(StreamInput in) throws IOException { BytesReference fullRequestBytes = request.serialize(new BytesStreamOutput()); BytesReference requestContent = fullRequestBytes.slice(headerSize, fullRequestBytes.length() - headerSize); Header requestHeader = new Header(fullRequestBytes.length() - 6, requestId, TransportStatus.setRequest((byte) 0), version); - InboundMessage requestMessage = new InboundMessage(requestHeader, ReleasableBytesReference.wrap(requestContent), () -> {}); + NativeInboundMessage requestMessage = new NativeInboundMessage( + requestHeader, + ReleasableBytesReference.wrap(requestContent), + () -> {} + ); requestHeader.finishParsingHeader(requestMessage.openOrGetStreamInput()); handler.inboundMessage(channel, requestMessage); @@ -562,7 +587,11 @@ public TestResponse read(StreamInput in) throws IOException { BytesReference fullResponseBytes = BytesReference.fromByteBuffer((ByteBuffer) buffer.flip()); BytesReference responseContent = fullResponseBytes.slice(headerSize, fullResponseBytes.length() - headerSize); Header responseHeader = new Header(fullResponseBytes.length() - 6, requestId, responseStatus, version); - InboundMessage responseMessage = new InboundMessage(responseHeader, ReleasableBytesReference.wrap(responseContent), () -> {}); + NativeInboundMessage responseMessage = new NativeInboundMessage( + responseHeader, + ReleasableBytesReference.wrap(responseContent), + () -> {} + ); responseHeader.finishParsingHeader(responseMessage.openOrGetStreamInput()); handler.inboundMessage(channel, responseMessage); @@ -628,7 +657,11 @@ public TestResponse read(StreamInput in) throws IOException { BytesReference fullRequestBytes = request.serialize(new BytesStreamOutput()); BytesReference requestContent = fullRequestBytes.slice(headerSize, fullRequestBytes.length() - headerSize); Header requestHeader = new Header(fullRequestBytes.length() - 6, requestId, TransportStatus.setRequest((byte) 0), version); - InboundMessage requestMessage = new InboundMessage(requestHeader, ReleasableBytesReference.wrap(requestContent), () -> {}); + NativeInboundMessage requestMessage = new NativeInboundMessage( + requestHeader, + ReleasableBytesReference.wrap(requestContent), + () -> {} + ); requestHeader.finishParsingHeader(requestMessage.openOrGetStreamInput()); handler.inboundMessage(channel, requestMessage); @@ -645,7 +678,11 @@ public TestResponse read(StreamInput in) throws IOException { // Create the response payload by intentionally stripping 1 byte away BytesReference responseContent = fullResponseBytes.slice(headerSize, fullResponseBytes.length() - headerSize - 1); Header responseHeader = new Header(fullResponseBytes.length() - 6, requestId, responseStatus, version); - InboundMessage responseMessage = new InboundMessage(responseHeader, ReleasableBytesReference.wrap(responseContent), () -> {}); + NativeInboundMessage responseMessage = new NativeInboundMessage( + responseHeader, + ReleasableBytesReference.wrap(responseContent), + () -> {} + ); responseHeader.finishParsingHeader(responseMessage.openOrGetStreamInput()); handler.inboundMessage(channel, responseMessage); @@ -654,8 +691,8 @@ public TestResponse read(StreamInput in) throws IOException { assertThat(exceptionCaptor.get().getMessage(), containsString("Failed to deserialize response from handler")); } - private static InboundMessage unreadableInboundHandshake(Version remoteVersion, Header requestHeader) { - return new InboundMessage(requestHeader, ReleasableBytesReference.wrap(BytesArray.EMPTY), () -> {}) { + private static NativeInboundMessage unreadableInboundHandshake(Version remoteVersion, Header requestHeader) { + return new NativeInboundMessage(requestHeader, ReleasableBytesReference.wrap(BytesArray.EMPTY), () -> {}) { @Override public StreamInput openOrGetStreamInput() { final StreamInput streamInput = new InputStreamStreamInput(new InputStream() { diff --git a/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java b/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java index 2dfe8a0dd8590..d54f7e6fd2c2b 100644 --- a/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java +++ b/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java @@ -49,6 +49,7 @@ import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.nativeprotocol.NativeInboundMessage; import java.io.IOException; import java.util.ArrayList; @@ -74,7 +75,7 @@ public void testPipelineHandling() throws IOException { final List toRelease = new ArrayList<>(); final BiConsumer messageHandler = (c, m) -> { try { - InboundMessage message = (InboundMessage) m; + NativeInboundMessage message = (NativeInboundMessage) m; final Header header = message.getHeader(); final MessageData actualData; final Version version = header.getVersion(); diff --git a/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java index 36ba409a2de03..ad7d4401af13c 100644 --- a/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java @@ -53,6 +53,7 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.nativeprotocol.NativeInboundMessage; import org.junit.After; import org.junit.Before; @@ -97,7 +98,7 @@ public void setUp() throws Exception { final InboundAggregator aggregator = new InboundAggregator(breaker, (Predicate) action -> true); pipeline = new InboundPipeline(statsTracker, millisSupplier, decoder, aggregator, (c, m) -> { try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { - InboundMessage m1 = (InboundMessage) m; + NativeInboundMessage m1 = (NativeInboundMessage) m; Streams.copy(m1.openOrGetStreamInput(), streamOutput); message.set(new Tuple<>(m1.getHeader(), streamOutput.bytes())); } catch (IOException e) { From 0282e64bb25a8485526a1b6bf584ac2b9495f219 Mon Sep 17 00:00:00 2001 From: Daniel Widdis Date: Thu, 18 Apr 2024 23:05:39 -0700 Subject: [PATCH 12/37] Ignore BaseRestHandler unconsumed content check as it's always consumed (#13290) * Ignore BaseRestHandler unconsumed content check as it's always consumed Signed-off-by: Daniel Widdis * Remove comment, continue to ignore content on Force Merge Signed-off-by: Daniel Widdis * Remove no-body test from RestDeletePitActionTests Signed-off-by: Daniel Widdis --------- Signed-off-by: Daniel Widdis --- CHANGELOG.md | 1 + .../org/opensearch/rest/BaseRestHandler.java | 4 - .../forcemerge/RestForceMergeActionTests.java | 22 ------ .../opensearch/rest/BaseRestHandlerTests.java | 79 ------------------- .../search/pit/RestDeletePitActionTests.java | 25 ------ 5 files changed, 1 insertion(+), 130 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5efcfee3d9d9d..6d8af16db72a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Enabled mockTelemetryPlugin for IT and fixed OOM issues ([#13054](https://github.com/opensearch-project/OpenSearch/pull/13054)) - Fix implement mark() and markSupported() in class FilterStreamInput ([#13098](https://github.com/opensearch-project/OpenSearch/pull/13098)) - Fix snapshot _status API to return correct status for partial snapshots ([#12812](https://github.com/opensearch-project/OpenSearch/pull/12812)) +- Ignore BaseRestHandler unconsumed content check as it's always consumed. ([#13290](https://github.com/opensearch-project/OpenSearch/pull/13290)) ### Security diff --git a/server/src/main/java/org/opensearch/rest/BaseRestHandler.java b/server/src/main/java/org/opensearch/rest/BaseRestHandler.java index 3552e32022b2c..fc150405747ec 100644 --- a/server/src/main/java/org/opensearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/opensearch/rest/BaseRestHandler.java @@ -122,10 +122,6 @@ public final void handleRequest(RestRequest request, RestChannel channel, NodeCl throw new IllegalArgumentException(unrecognized(request, unconsumedParams, candidateParams, "parameter")); } - if (request.hasContent() && request.isContentConsumed() == false) { - throw new IllegalArgumentException("request [" + request.method() + " " + request.path() + "] does not support having a body"); - } - usageCount.increment(); // execute the action action.accept(channel); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/RestForceMergeActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/RestForceMergeActionTests.java index b09e592922ed9..01d72b78a679e 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/RestForceMergeActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/RestForceMergeActionTests.java @@ -32,14 +32,9 @@ package org.opensearch.action.admin.indices.forcemerge; -import org.opensearch.client.node.NodeClient; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.admin.indices.RestForceMergeAction; -import org.opensearch.test.rest.FakeRestChannel; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.test.rest.RestActionTestCase; import org.junit.Before; @@ -47,9 +42,6 @@ import java.util.HashMap; import java.util.Map; -import static org.hamcrest.Matchers.equalTo; -import static org.mockito.Mockito.mock; - public class RestForceMergeActionTests extends RestActionTestCase { @Before @@ -57,20 +49,6 @@ public void setUpAction() { controller().registerHandler(new RestForceMergeAction()); } - public void testBodyRejection() throws Exception { - final RestForceMergeAction handler = new RestForceMergeAction(); - String json = JsonXContent.contentBuilder().startObject().field("max_num_segments", 1).endObject().toString(); - final FakeRestRequest request = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent( - new BytesArray(json), - MediaTypeRegistry.JSON - ).withPath("/_forcemerge").build(); - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> handler.handleRequest(request, new FakeRestChannel(request, randomBoolean(), 1), mock(NodeClient.class)) - ); - assertThat(e.getMessage(), equalTo("request [GET /_forcemerge] does not support having a body")); - } - public void testDeprecationMessage() { final Map params = new HashMap<>(); params.put("only_expunge_deletes", Boolean.TRUE.toString()); diff --git a/server/src/test/java/org/opensearch/rest/BaseRestHandlerTests.java b/server/src/test/java/org/opensearch/rest/BaseRestHandlerTests.java index ce929e64d8960..45653e9d8e4d6 100644 --- a/server/src/test/java/org/opensearch/rest/BaseRestHandlerTests.java +++ b/server/src/test/java/org/opensearch/rest/BaseRestHandlerTests.java @@ -35,10 +35,6 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.common.Table; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.xcontent.MediaTypeRegistry; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.RestHandler.ReplacedRoute; import org.opensearch.rest.RestHandler.Route; import org.opensearch.rest.RestRequest.Method; @@ -281,81 +277,6 @@ public String getName() { assertTrue(executed.get()); } - public void testConsumedBody() throws Exception { - final AtomicBoolean executed = new AtomicBoolean(); - final BaseRestHandler handler = new BaseRestHandler() { - @Override - protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - request.content(); - return channel -> executed.set(true); - } - - @Override - public String getName() { - return "test_consumed_body"; - } - }; - - try (XContentBuilder builder = JsonXContent.contentBuilder().startObject().endObject()) { - final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( - new BytesArray(builder.toString()), - MediaTypeRegistry.JSON - ).build(); - final RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); - handler.handleRequest(request, channel, mockClient); - assertTrue(executed.get()); - } - } - - public void testUnconsumedNoBody() throws Exception { - final AtomicBoolean executed = new AtomicBoolean(); - final BaseRestHandler handler = new BaseRestHandler() { - @Override - protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - return channel -> executed.set(true); - } - - @Override - public String getName() { - return "test_unconsumed_body"; - } - }; - - final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).build(); - final RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); - handler.handleRequest(request, channel, mockClient); - assertTrue(executed.get()); - } - - public void testUnconsumedBody() throws IOException { - final AtomicBoolean executed = new AtomicBoolean(); - final BaseRestHandler handler = new BaseRestHandler() { - @Override - protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - return channel -> executed.set(true); - } - - @Override - public String getName() { - return "test_unconsumed_body"; - } - }; - - try (XContentBuilder builder = JsonXContent.contentBuilder().startObject().endObject()) { - final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( - new BytesArray(builder.toString()), - MediaTypeRegistry.JSON - ).build(); - final RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); - final IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> handler.handleRequest(request, channel, mockClient) - ); - assertThat(e, hasToString(containsString("request [GET /] does not support having a body"))); - assertFalse(executed.get()); - } - } - public void testReplaceRoutesMethod() throws Exception { List routes = Arrays.asList(new Route(Method.GET, "/path/test"), new Route(Method.PUT, "/path2/test")); List replacedRoutes = RestHandler.replaceRoutes(routes, "/prefix", "/deprecatedPrefix"); diff --git a/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java b/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java index b60541825e3ed..448ba9e5a8cd7 100644 --- a/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java +++ b/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java @@ -82,31 +82,6 @@ public void deletePits(DeletePitRequest request, ActionListener pitCalled = new SetOnce<>(); - try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { - @Override - public void deletePits(DeletePitRequest request, ActionListener listener) { - pitCalled.set(true); - assertThat(request.getPitIds(), hasSize(1)); - assertThat(request.getPitIds().get(0), equalTo("_all")); - } - }) { - RestDeletePitAction action = new RestDeletePitAction(); - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( - new BytesArray("{\"pit_id\": [\"BODY\"]}"), - MediaTypeRegistry.JSON - ).withPath("/_all").build(); - FakeRestChannel channel = new FakeRestChannel(request, false, 0); - - IllegalArgumentException ex = expectThrows( - IllegalArgumentException.class, - () -> action.handleRequest(request, channel, nodeClient) - ); - assertTrue(ex.getMessage().contains("request [GET /_all] does not support having a body")); - } - } - public void testDeletePitQueryStringParamsShouldThrowException() { SetOnce pitCalled = new SetOnce<>(); try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { From b4692c897ec4b19161417147d29b116671616fe1 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 19 Apr 2024 11:03:06 -0400 Subject: [PATCH 13/37] Reconsider the breaking changes check policy to detect breaking changes against released versions (#13292) * Reconsider the breaking changes check policy to detect breaking changes against released versions Signed-off-by: Andriy Redko * Add an ability to provide the target version to compare with for japicmp tasks Signed-off-by: Andriy Redko * Add documentation for japicmp tasks Signed-off-by: Andriy Redko --------- Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + DEVELOPER_GUIDE.md | 15 +++++++++++ server/build.gradle | 65 ++++++++++++++++++++++++++++----------------- 3 files changed, 56 insertions(+), 25 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6d8af16db72a7..dadfcbfbd8b05 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Improve built-in secure transports support ([#12907](https://github.com/opensearch-project/OpenSearch/pull/12907)) - Update links to documentation in rest-api-spec ([#13043](https://github.com/opensearch-project/OpenSearch/pull/13043)) - Refactoring globMatch using simpleMatchWithNormalizedStrings from Regex ([#13104](https://github.com/opensearch-project/OpenSearch/pull/13104)) +- [BWC and API enforcement] Reconsider the breaking changes check policy to detect breaking changes against released versions ([#13292](https://github.com/opensearch-project/OpenSearch/pull/13292)) ### Deprecated diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index f0851fc58d444..92ef71b92da7e 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -57,6 +57,7 @@ - [Developer API](#developer-api) - [User API](#user-api) - [Experimental Development](#experimental-development) + - [API Compatibility Checks](#api-compatibility-checks) - [Backports](#backports) - [LineLint](#linelint) - [Lucene Snapshots](#lucene-snapshots) @@ -607,6 +608,20 @@ a LTS feature but with additional guard rails and communication mechanisms to si release, or be removed altogether. Any Developer or User APIs implemented along with the experimental feature should be marked with `@ExperimentalApi` (or documented as `@opensearch.experimental`) annotation to signal the implementation is not subject to LTS and does not follow backwards compatibility guidelines. +#### API Compatibility Checks + +The compatibility checks for public APIs are performed using [japicmp](https://siom79.github.io/japicmp/) and are available as separate Gradle tasks (those are run on demand at the moment): + +``` +./gradlew japicmp +``` + +By default, the API compatibility checks are run against the latest released version of the OpenSearch, however the target version to compare to could be provided using system property during the build, fe.: + +``` +./gradlew japicmp -Djapicmp.compare.version=2.14.0-SNAPSHOT +``` + ### Backports The Github workflow in [`backport.yml`](.github/workflows/backport.yml) creates backport PRs automatically when the original PR with an appropriate label `backport ` is merged to main with the backport workflow run successfully on the PR. For example, if a PR on main needs to be backported to `1.x` branch, add a label `backport 1.x` to the PR and make sure the backport workflow runs on the PR along with other checks. Once this PR is merged to main, the workflow will create a backport PR to the `1.x` branch. diff --git a/server/build.gradle b/server/build.gradle index a076a6bee36bf..fc383f940991c 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -173,6 +173,22 @@ tasks.named("testingConventions").configure { } } +// Set to current version by default +def japicmpCompareTarget = System.getProperty("japicmp.compare.version") +if (japicmpCompareTarget == null) { /* use latest released version */ + // Read the list from maven central. + // Fetch the metadata and parse the xml into Version instances, pick the latest one + japicmpCompareTarget = new URL('https://repo1.maven.org/maven2/org/opensearch/opensearch/maven-metadata.xml').openStream().withStream { s -> + new XmlParser().parse(s) + .versioning.versions.version + .collect { it.text() }.findAll { it ==~ /\d+\.\d+\.\d+/ } + .collect { org.opensearch.gradle.Version.fromString(it) } + .toSorted() + .last() + .toString() + } +} + def generateModulesList = tasks.register("generateModulesList") { List modules = project(':modules').subprojects.collect { it.name } File modulesFile = new File(buildDir, 'generated-resources/modules.txt') @@ -380,9 +396,10 @@ tasks.named("sourcesJar").configure { } } -/** Compares the current build against a snapshot build */ +/** Compares the current build against a laltest released version or the version supplied through 'japicmp.compare.version' system property */ tasks.register("japicmp", me.champeau.gradle.japicmp.JapicmpTask) { - oldClasspath.from(files("${buildDir}/snapshot/opensearch-${version}.jar")) + logger.info("Comparing public APIs from ${version} to ${japicmpCompareTarget}") + oldClasspath.from(files("${buildDir}/japicmp-target/opensearch-${japicmpCompareTarget}.jar")) newClasspath.from(tasks.named('jar')) onlyModified = true failOnModification = true @@ -390,50 +407,48 @@ tasks.register("japicmp", me.champeau.gradle.japicmp.JapicmpTask) { annotationIncludes = ['@org.opensearch.common.annotation.PublicApi', '@org.opensearch.common.annotation.DeprecatedApi'] txtOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.txt") htmlOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.html") - dependsOn downloadSnapshot + dependsOn downloadJapicmpCompareTarget } /** If the Java API Comparison task failed, print a hint if the change should be merged from its target branch */ gradle.taskGraph.afterTask { Task task, TaskState state -> if (task.name == 'japicmp' && state.failure != null) { - def sha = getGitShaFromJar("${buildDir}/snapshot/opensearch-${version}.jar") - logger.info("Incompatiable java api from snapshot jar built off of commit ${sha}") - - if (!inHistory(sha)) { - logger.warn('\u001B[33mPlease merge from the target branch and run this task again.\u001B[0m') - } + logger.info("Public APIs changes incompatiable with ${japicmpCompareTarget} target have been detected") } } -/** Downloads latest snapshot from maven repository */ -tasks.register("downloadSnapshot", Copy) { +/** Downloads latest released version from maven repository */ +tasks.register("downloadJapicmpCompareTarget", Copy) { def mavenSnapshotRepoUrl = "https://aws.oss.sonatype.org/content/repositories/snapshots/" def groupId = "org.opensearch" def artifactId = "opensearch" - def repos = project.getRepositories(); - MavenArtifactRepository opensearchRepo = repos.maven(repo -> { - repo.setName("opensearch-snapshots"); - repo.setUrl(mavenSnapshotRepoUrl); - }); - - repos.exclusiveContent(exclusiveRepo -> { - exclusiveRepo.filter(descriptor -> descriptor.includeGroup(groupId)); - exclusiveRepo.forRepositories(opensearchRepo); - }); + // Add repository for snapshot artifacts if japicmp compare target version is snapshot + if (japicmpCompareTarget.endsWith("-SNAPSHOT")) { + def repos = project.getRepositories(); + MavenArtifactRepository opensearchRepo = repos.maven(repo -> { + repo.setName("opensearch-snapshots"); + repo.setUrl(mavenSnapshotRepoUrl); + }); + + repos.exclusiveContent(exclusiveRepo -> { + exclusiveRepo.filter(descriptor -> descriptor.includeGroup(groupId)); + exclusiveRepo.forRepositories(opensearchRepo); + }); + } configurations { - snapshotArtifact { + japicmpCompareTargetArtifact { exclude group: 'org.apache.lucene' } } dependencies { - snapshotArtifact("${groupId}:${artifactId}:${version}:") + japicmpCompareTargetArtifact("${groupId}:${artifactId}:${japicmpCompareTarget}:") } - from configurations.snapshotArtifact - into "$buildDir/snapshot" + from configurations.japicmpCompareTargetArtifact + into "$buildDir/japicmp-target" } /** Check if the sha is in the current history */ From bfbdc9f43ebfcd55c0b2a5a96d9e2a73bd66ff04 Mon Sep 17 00:00:00 2001 From: Rishabh Maurya Date: Sun, 21 Apr 2024 12:32:47 -0700 Subject: [PATCH 14/37] Fix the flaky test for derived fields highlighter test (#13313) Signed-off-by: Rishabh Maurya --- .../DerivedFieldFetchAndHighlightTests.java | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/DerivedFieldFetchAndHighlightTests.java b/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/DerivedFieldFetchAndHighlightTests.java index 28d97c74d9445..92127da9654aa 100644 --- a/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/DerivedFieldFetchAndHighlightTests.java +++ b/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/DerivedFieldFetchAndHighlightTests.java @@ -144,17 +144,17 @@ public void testDerivedFieldFromIndexMapping() throws IOException { // create a fetch context to be used by HighlightPhase processor FetchContext fetchContext = mock(FetchContext.class); - when(fetchContext.mapperService()).thenReturn(mockShardContext.getMapperService()); + when(fetchContext.mapperService()).thenReturn(mapperService); when(fetchContext.getQueryShardContext()).thenReturn(mockShardContext); when(fetchContext.getIndexSettings()).thenReturn(indexService.getIndexSettings()); when(fetchContext.searcher()).thenReturn( new ContextIndexSearcher( - searcher.getIndexReader(), - searcher.getSimilarity(), - searcher.getQueryCache(), - searcher.getQueryCachingPolicy(), + reader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), true, - searcher.getExecutor(), + null, null ) ); @@ -253,17 +253,17 @@ public void testDerivedFieldFromSearchMapping() throws IOException { // create a fetch context to be used by HighlightPhase processor FetchContext fetchContext = mock(FetchContext.class); - when(fetchContext.mapperService()).thenReturn(mockShardContext.getMapperService()); + when(fetchContext.mapperService()).thenReturn(mapperService); when(fetchContext.getQueryShardContext()).thenReturn(mockShardContext); when(fetchContext.getIndexSettings()).thenReturn(indexService.getIndexSettings()); when(fetchContext.searcher()).thenReturn( new ContextIndexSearcher( - searcher.getIndexReader(), - searcher.getSimilarity(), - searcher.getQueryCache(), - searcher.getQueryCachingPolicy(), + reader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), true, - searcher.getExecutor(), + null, null ) ); From 98009ed9b83d43377e6b3ffa610db7154ff7d200 Mon Sep 17 00:00:00 2001 From: Srikanth Padakanti Date: Mon, 22 Apr 2024 03:02:30 -0500 Subject: [PATCH 15/37] Improve error messages for `_stats` with closed indices (#13012) * Improve the error messages for _stats with closed indices Signed-off-by: srikanth padakanti * Edit the changelog text Signed-off-by: srikanth padakanti * correct the test cases Signed-off-by: srikanth padakanti * fix to throw appropriate error message Signed-off-by: srikanth padakanti * fix to throw appropriate error message Signed-off-by: srikanth padakanti --------- Signed-off-by: srikanth padakanti Signed-off-by: Srikanth Padakanti --- CHANGELOG.md | 1 + .../index/rankeval/RankEvalRequestIT.java | 2 +- .../metadata/IndexNameExpressionResolver.java | 8 +++++++- .../IndexNameExpressionResolverTests.java | 19 +++++++++++++++++++ 4 files changed, 28 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dadfcbfbd8b05..0594cfea2c89d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -60,6 +60,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Enabled mockTelemetryPlugin for IT and fixed OOM issues ([#13054](https://github.com/opensearch-project/OpenSearch/pull/13054)) - Fix implement mark() and markSupported() in class FilterStreamInput ([#13098](https://github.com/opensearch-project/OpenSearch/pull/13098)) - Fix snapshot _status API to return correct status for partial snapshots ([#12812](https://github.com/opensearch-project/OpenSearch/pull/12812)) +- Improve the error messages for _stats with closed indices ([#13012](https://github.com/opensearch-project/OpenSearch/pull/13012)) - Ignore BaseRestHandler unconsumed content check as it's always consumed. ([#13290](https://github.com/opensearch-project/OpenSearch/pull/13290)) ### Security diff --git a/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java b/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java index 488c2e33648e7..0e3db9d1c78b3 100644 --- a/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java +++ b/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java @@ -345,7 +345,7 @@ public void testIndicesOptions() { request.indicesOptions(IndicesOptions.fromParameters("closed", null, null, "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); assertEquals(1, response.getFailures().size()); - assertThat(response.getFailures().get("amsterdam_query"), instanceOf(IndexClosedException.class)); + assertThat(response.getFailures().get("amsterdam_query"), instanceOf(IllegalArgumentException.class)); // test allow_no_indices request = new RankEvalRequest(task, new String[] { "bad*" }); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java index 9a3b569a7ac3d..24ff83d638d4b 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java @@ -380,7 +380,13 @@ private void checkSystemIndexAccess(Context context, Metadata metadata, Set Date: Mon, 22 Apr 2024 18:07:57 +0530 Subject: [PATCH 16/37] [Remote Store] Throw IOException when remote is not in sync (#13282) Signed-off-by: Gaurav Bafna --- .../indices/create/RemoteCloneIndexIT.java | 26 +++++++--- .../RemotePrimaryRelocationIT.java | 51 +++++++++---------- .../opensearch/index/shard/IndexShard.java | 12 +++-- .../opensearch/index/shard/StoreRecovery.java | 15 ------ 4 files changed, 52 insertions(+), 52 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java index f50e8fd0a38cf..98c2a3a1581b8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java @@ -40,13 +40,17 @@ */ import org.opensearch.Version; +import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; import org.opensearch.action.admin.indices.shrink.ResizeType; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.client.Requests; import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.test.VersionUtils; @@ -156,7 +160,11 @@ public void testCreateCloneIndexFailure() throws ExecutionException, Interrupted client().admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")) + .setTransientSettings( + Settings.builder() + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") + .put(RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT.getKey(), "10s") + ) .get(); try { setFailRate(REPOSITORY_NAME, 100); @@ -168,9 +176,14 @@ public void testCreateCloneIndexFailure() throws ExecutionException, Interrupted .setWaitForActiveShards(0) .setSettings(Settings.builder().put("index.number_of_replicas", 0).putNull("index.blocks.write").build()) .get(); - - Thread.sleep(2000); - ensureYellow("target"); + // waiting more than waitForRemoteStoreSync's sleep time of 30 sec to deterministically fail + Thread.sleep(40000); + ensureRed("target"); + ClusterHealthRequest healthRequest = Requests.clusterHealthRequest() + .waitForNoRelocatingShards(true) + .waitForNoInitializingShards(true); + ClusterHealthResponse actionGet = client().admin().cluster().health(healthRequest).actionGet(); + assertEquals(actionGet.getUnassignedShards(), numPrimaryShards); } catch (ExecutionException | InterruptedException e) { throw new RuntimeException(e); @@ -182,11 +195,12 @@ public void testCreateCloneIndexFailure() throws ExecutionException, Interrupted .cluster() .prepareUpdateSettings() .setTransientSettings( - Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null) + Settings.builder() + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null) + .put(RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT.getKey(), (String) null) ) .get(); } - } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java index b1c429a45a1a1..4a4057def4207 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java @@ -8,9 +8,8 @@ package org.opensearch.remotemigration; -import com.carrotsearch.randomizedtesting.generators.RandomNumbers; - import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; @@ -18,11 +17,13 @@ import org.opensearch.action.delete.DeleteResponse; import org.opensearch.action.index.IndexResponse; import org.opensearch.client.Client; +import org.opensearch.client.Requests; import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; @@ -162,12 +163,12 @@ public void testMixedModeRelocation_RemoteSeedingFail() throws Exception { String remoteNode = internalCluster().startNode(); internalCluster().validateClusterFormed(); - // assert repo gets registered - GetRepositoriesRequest gr = new GetRepositoriesRequest(new String[] { REPOSITORY_NAME }); - GetRepositoriesResponse getRepositoriesResponse = client.admin().cluster().getRepositories(gr).actionGet(); - assertEquals(1, getRepositoriesResponse.repositories().size()); - setFailRate(REPOSITORY_NAME, 100); + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT.getKey(), "10s")) + .get(); logger.info("--> relocating from {} to {} ", docRepNode, remoteNode); client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, docRepNode, remoteNode)).execute().actionGet(); @@ -181,29 +182,23 @@ public void testMixedModeRelocation_RemoteSeedingFail() throws Exception { .actionGet(); assertTrue(clusterHealthResponse.getRelocatingShards() == 1); - setFailRate(REPOSITORY_NAME, 0); - Thread.sleep(RandomNumbers.randomIntBetween(random(), 0, 2000)); - clusterHealthResponse = client().admin() - .cluster() - .prepareHealth() - .setTimeout(TimeValue.timeValueSeconds(45)) - .setWaitForEvents(Priority.LANGUID) - .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); - assertTrue(clusterHealthResponse.getRelocatingShards() == 0); - logger.info("--> remote to remote relocation complete"); + // waiting more than waitForRemoteStoreSync's sleep time of 30 sec to deterministically fail + Thread.sleep(40000); + + ClusterHealthRequest healthRequest = Requests.clusterHealthRequest() + .waitForNoRelocatingShards(true) + .waitForNoInitializingShards(true); + ClusterHealthResponse actionGet = client().admin().cluster().health(healthRequest).actionGet(); + assertEquals(actionGet.getRelocatingShards(), 0); + assertEquals(docRepNode, primaryNodeName("test")); + finished.set(true); indexingThread.join(); - refresh("test"); - OpenSearchAssertions.assertHitCount(client().prepareSearch("test").setTrackTotalHits(true).get(), numAutoGenDocs.get()); - OpenSearchAssertions.assertHitCount( - client().prepareSearch("test") - .setTrackTotalHits(true)// extra paranoia ;) - .setQuery(QueryBuilders.termQuery("auto", true)) - .get(), - numAutoGenDocs.get() - ); + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT.getKey(), (String) null)) + .get(); } private static Thread getIndexingThread(AtomicBoolean finished, AtomicInteger numAutoGenDocs) { diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 26dbbbcdee7c0..72a67096fcd9e 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -2118,15 +2118,16 @@ public boolean isRemoteSegmentStoreInSync() { return false; } - public void waitForRemoteStoreSync() { + public void waitForRemoteStoreSync() throws IOException { waitForRemoteStoreSync(() -> {}); } /* Blocks the calling thread, waiting for the remote store to get synced till internal Remote Upload Timeout Calls onProgress on seeing an increased file count on remote + Throws IOException if the remote store is not synced within the timeout */ - public void waitForRemoteStoreSync(Runnable onProgress) { + public void waitForRemoteStoreSync(Runnable onProgress) throws IOException { assert indexSettings.isAssignedOnRemoteNode(); RemoteSegmentStoreDirectory directory = getRemoteDirectory(); int segmentUploadeCount = 0; @@ -2138,7 +2139,7 @@ public void waitForRemoteStoreSync(Runnable onProgress) { while (System.nanoTime() - startNanos < getRecoverySettings().internalRemoteUploadTimeout().nanos()) { try { if (isRemoteSegmentStoreInSync()) { - break; + return; } else { if (directory.getSegmentsUploadedToRemoteStore().size() > segmentUploadeCount) { onProgress.run(); @@ -2156,6 +2157,11 @@ public void waitForRemoteStoreSync(Runnable onProgress) { return; } } + throw new IOException( + "Failed to upload to remote segment store within remote upload timeout of " + + getRecoverySettings().internalRemoteUploadTimeout().getMinutes() + + " minutes" + ); } public void preRecovery() { diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index f5e342d28fde1..e130f50e105d5 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -193,13 +193,6 @@ void recoverFromLocalShards( indexShard.getEngine().forceMerge(false, -1, false, false, false, UUIDs.randomBase64UUID()); if (indexShard.isRemoteTranslogEnabled() && indexShard.shardRouting.primary()) { indexShard.waitForRemoteStoreSync(); - if (indexShard.isRemoteSegmentStoreInSync() == false) { - throw new IndexShardRecoveryException( - indexShard.shardId(), - "failed to upload to remote", - new IOException("Failed to upload to remote segment store") - ); - } } return true; } catch (IOException ex) { @@ -436,10 +429,6 @@ void recoverFromSnapshotAndRemoteStore( indexShard.finalizeRecovery(); if (indexShard.isRemoteTranslogEnabled() && indexShard.shardRouting.primary()) { indexShard.waitForRemoteStoreSync(); - if (indexShard.isRemoteSegmentStoreInSync() == false) { - listener.onFailure(new IndexShardRestoreFailedException(shardId, "Failed to upload to remote segment store")); - return; - } } indexShard.postRecovery("restore done"); @@ -722,10 +711,6 @@ private void restore( indexShard.finalizeRecovery(); if (indexShard.isRemoteTranslogEnabled() && indexShard.shardRouting.primary()) { indexShard.waitForRemoteStoreSync(); - if (indexShard.isRemoteSegmentStoreInSync() == false) { - listener.onFailure(new IndexShardRestoreFailedException(shardId, "Failed to upload to remote segment store")); - return; - } } indexShard.postRecovery("restore done"); listener.onResponse(true); From 4e58cef69c00aa255c569bf23dc1ee552f208a64 Mon Sep 17 00:00:00 2001 From: Ankit Jain Date: Mon, 22 Apr 2024 18:31:11 +0530 Subject: [PATCH 17/37] Ignoring unavailable shards during search request execution with ignore_available parameter (#13298) Signed-off-by: Ankit Jain --- CHANGELOG.md | 1 + .../search/AbstractSearchAsyncAction.java | 6 ++++-- .../AbstractSearchAsyncActionTests.java | 19 +++++++++++++++++++ 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0594cfea2c89d..98a696c62cfa5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) - Improve built-in secure transports support ([#12907](https://github.com/opensearch-project/OpenSearch/pull/12907)) - Update links to documentation in rest-api-spec ([#13043](https://github.com/opensearch-project/OpenSearch/pull/13043)) +- Ignoring unavailable shards during search request execution with ignore_available parameter ([#13298](https://github.com/opensearch-project/OpenSearch/pull/13298)) - Refactoring globMatch using simpleMatchWithNormalizedStrings from Regex ([#13104](https://github.com/opensearch-project/OpenSearch/pull/13104)) - [BWC and API enforcement] Reconsider the breaking changes check policy to detect breaking changes against released versions ([#13292](https://github.com/opensearch-project/OpenSearch/pull/13292)) diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index 0520a4a7aecec..9bf4a4b1e18f1 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -425,8 +425,10 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha currentPhase.getName() ); } - onPhaseFailure(currentPhase, "Partial shards failure (" + discrepancy + " shards unavailable)", null); - return; + if (!request.indicesOptions().ignoreUnavailable()) { + onPhaseFailure(currentPhase, "Partial shards failure (" + discrepancy + " shards unavailable)", null); + return; + } } } if (logger.isTraceEnabled()) { diff --git a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java index 420289d3ff2e5..7dcbf213d6c9d 100644 --- a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java @@ -70,6 +70,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -442,6 +443,24 @@ public void testShardNotAvailableWithDisallowPartialFailures() { assertEquals(0, searchPhaseExecutionException.getSuppressed().length); } + public void testShardNotAvailableWithIgnoreUnavailable() { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false) + .indicesOptions(new IndicesOptions(EnumSet.of(IndicesOptions.Option.IGNORE_UNAVAILABLE), IndicesOptions.WildcardStates.NONE)); + AtomicReference exception = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap(response -> {}, exception::set); + int numShards = randomIntBetween(2, 10); + ArraySearchPhaseResults phaseResults = new ArraySearchPhaseResults<>(numShards); + AbstractSearchAsyncAction action = createAction(searchRequest, phaseResults, listener, false, new AtomicLong()); + // skip one to avoid the "all shards failed" failure. + SearchShardIterator skipIterator = new SearchShardIterator(null, null, Collections.emptyList(), null); + skipIterator.resetAndSkip(); + action.skipShard(skipIterator); + + // Validate no exception is thrown + action.executeNextPhase(action, createFetchSearchPhase()); + action.sendSearchResponse(InternalSearchResponse.empty(), phaseResults.results); + } + private static ArraySearchPhaseResults phaseResults( Set contextIds, List> nodeLookups, From 5bc065b191cf6d20b78218ae57af3b389903b4ea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Apr 2024 09:22:44 -0700 Subject: [PATCH 18/37] Bump com.gradle.enterprise from 3.17.1 to 3.17.2 (#13327) * Bump com.gradle.enterprise from 3.17.1 to 3.17.2 Bumps com.gradle.enterprise from 3.17.1 to 3.17.2. --- updated-dependencies: - dependency-name: com.gradle.enterprise dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 2 +- settings.gradle | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 98a696c62cfa5..2bb98c67256e0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,7 +34,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.commons:commonscodec` from 1.15 to 1.16.1 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) - Bump `org.apache.commons:commonslang` from 3.13.0 to 3.14.0 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) - Bump Apache Tika from 2.6.0 to 2.9.2 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) -- Bump `com.gradle.enterprise` from 3.16.2 to 3.17.1 ([#13116](https://github.com/opensearch-project/OpenSearch/pull/13116), [#13191](https://github.com/opensearch-project/OpenSearch/pull/13191)) +- Bump `com.gradle.enterprise` from 3.16.2 to 3.17.2 ([#13116](https://github.com/opensearch-project/OpenSearch/pull/13116), [#13191](https://github.com/opensearch-project/OpenSearch/pull/13191), [#13327](https://github.com/opensearch-project/OpenSearch/pull/13327)) - Bump `gradle/wrapper-validation-action` from 2 to 3 ([#13192](https://github.com/opensearch-project/OpenSearch/pull/13192)) - Bump joda from 2.12.2 to 2.12.7 ([#13193](https://github.com/opensearch-project/OpenSearch/pull/13193)) - Bump bouncycastle from 1.77 to 1.78 ([#13243](https://github.com/opensearch-project/OpenSearch/pull/13243)) diff --git a/settings.gradle b/settings.gradle index e7d3dc56b67b9..8e961b9d4179f 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.17.1" + id "com.gradle.enterprise" version "3.17.2" } ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE') From cf58fabf203ade0e87b29cb7c7c8f5b133311ec3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Apr 2024 09:25:22 -0700 Subject: [PATCH 19/37] Bump com.google.apis:google-api-services-compute from v1-rev235-1.25.0 to v1-rev20240407-2.0.0 in /plugins/discovery-gce (#13333) * Bump com.google.apis:google-api-services-compute Bumps com.google.apis:google-api-services-compute from v1-rev235-1.25.0 to v1-rev20240407-2.0.0. --- updated-dependencies: - dependency-name: com.google.apis:google-api-services-compute dependency-type: direct:production ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/discovery-gce/build.gradle | 2 +- .../google-api-services-compute-v1-rev20240407-2.0.0.jar.sha1 | 1 + .../google-api-services-compute-v1-rev235-1.25.0.jar.sha1 | 1 - 4 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20240407-2.0.0.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/google-api-services-compute-v1-rev235-1.25.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 2bb98c67256e0..3e8184cb29a4d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump joda from 2.12.2 to 2.12.7 ([#13193](https://github.com/opensearch-project/OpenSearch/pull/13193)) - Bump bouncycastle from 1.77 to 1.78 ([#13243](https://github.com/opensearch-project/OpenSearch/pull/13243)) - Update google dependencies in repository-gcs and discovery-gce ([#13213](https://github.com/opensearch-project/OpenSearch/pull/13213)) +- Bump `com.google.apis:google-api-services-compute` from v1-rev235-1.25.0 to v1-rev20240407-2.0.0 ([#13333](https://github.com/opensearch-project/OpenSearch/pull/13333)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 92cdda59d1c99..80aae03bc0332 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -18,7 +18,7 @@ opensearchplugin { } dependencies { - api "com.google.apis:google-api-services-compute:v1-rev235-1.25.0" + api "com.google.apis:google-api-services-compute:v1-rev20240407-2.0.0" api "com.google.api-client:google-api-client:1.35.2" api "com.google.oauth-client:google-oauth-client:1.35.0" api "com.google.http-client:google-http-client:${versions.google_http_client}" diff --git a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20240407-2.0.0.jar.sha1 b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20240407-2.0.0.jar.sha1 new file mode 100644 index 0000000000000..834d718641a51 --- /dev/null +++ b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20240407-2.0.0.jar.sha1 @@ -0,0 +1 @@ +edf93bc92c9b87fee51aa6c3545b565e58075c05 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev235-1.25.0.jar.sha1 b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev235-1.25.0.jar.sha1 deleted file mode 100644 index f79af846281de..0000000000000 --- a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev235-1.25.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -67bf1ac84286b4f9ea996a90f6e91e36dc648aff \ No newline at end of file From c9f1565939c8328c23f235aa547ab55d0085af73 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Apr 2024 11:52:18 -0700 Subject: [PATCH 20/37] Bump commons-cli:commons-cli from 1.6.0 to 1.7.0 in /plugins/repository-hdfs (#13331) * Bump commons-cli:commons-cli in /plugins/repository-hdfs Bumps commons-cli:commons-cli from 1.6.0 to 1.7.0. --- updated-dependencies: - dependency-name: commons-cli:commons-cli dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/commons-cli-1.6.0.jar.sha1 | 1 - plugins/repository-hdfs/licenses/commons-cli-1.7.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/commons-cli-1.6.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/commons-cli-1.7.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 3e8184cb29a4d..b5db1a9084572 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump bouncycastle from 1.77 to 1.78 ([#13243](https://github.com/opensearch-project/OpenSearch/pull/13243)) - Update google dependencies in repository-gcs and discovery-gce ([#13213](https://github.com/opensearch-project/OpenSearch/pull/13213)) - Bump `com.google.apis:google-api-services-compute` from v1-rev235-1.25.0 to v1-rev20240407-2.0.0 ([#13333](https://github.com/opensearch-project/OpenSearch/pull/13333)) +- Bump `commons-cli:commons-cli` from 1.6.0 to 1.7.0 ([#13331](https://github.com/opensearch-project/OpenSearch/pull/13331)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index cd7175e70e607..e019a878dfcf0 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -70,7 +70,7 @@ dependencies { api 'com.google.code.gson:gson:2.10.1' runtimeOnly "com.google.guava:guava:${versions.guava}" api "commons-logging:commons-logging:${versions.commonslogging}" - api 'commons-cli:commons-cli:1.6.0' + api 'commons-cli:commons-cli:1.7.0' api "commons-codec:commons-codec:${versions.commonscodec}" api 'commons-collections:commons-collections:3.2.2' api "org.apache.commons:commons-compress:${versions.commonscompress}" diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.6.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.6.0.jar.sha1 deleted file mode 100644 index bb94eda6814ea..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-cli-1.6.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -38166a23afb5bd5520f739b87b3be87f7f0fb96d \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.7.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.7.0.jar.sha1 new file mode 100644 index 0000000000000..759bc9275d346 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-cli-1.7.0.jar.sha1 @@ -0,0 +1 @@ +6504b3f17e8bc5adc6b6c8deecc90144d0154075 \ No newline at end of file From efa06feeabd336e3d95764d072b116250c01d8f5 Mon Sep 17 00:00:00 2001 From: Lakshya Taragi <157457166+ltaragi@users.noreply.github.com> Date: Tue, 23 Apr 2024 15:59:52 +0530 Subject: [PATCH 21/37] Prefer remote replicas for primary promotion during migration (#13136) Signed-off-by: Lakshya Taragi --- .../RemoteDualReplicationIT.java | 135 +++++++++++++++++ .../cluster/routing/RoutingNodes.java | 32 +++- .../remotestore/RemoteStoreNodeService.java | 14 ++ .../allocation/FailedShardsRoutingTests.java | 140 ++++++++++++++++++ 4 files changed, 316 insertions(+), 5 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java index e316bae5d8ebc..18f07910403d4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java @@ -439,6 +439,141 @@ public void testFailoverRemotePrimaryToDocrepReplica() throws Exception { ); } + /* + Scenario: + - Starts 1 docrep backed data node + - Creates an index with 0 replica + - Starts 1 remote backed data node + - Moves primary copy from docrep to remote through _cluster/reroute + - Starts 1 more remote backed data node + - Expands index to 2 replicas, one each on new remote node and docrep node + - Stops remote enabled node hosting the primary + - Ensures remote replica gets promoted to primary + - Ensures doc count is same after failover + - Indexes some more docs to ensure working of failed-over primary + */ + public void testFailoverRemotePrimaryToRemoteReplica() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + + logger.info("---> Starting 1 docrep data node"); + String docrepNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals(internalCluster().client().admin().cluster().prepareGetRepositories().get().repositories().size(), 0); + + logger.info("---> Creating index with 0 replica"); + createIndex(FAILOVER_REMOTE_TO_REMOTE, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()); + ensureGreen(FAILOVER_REMOTE_TO_REMOTE); + initDocRepToRemoteMigration(); + + logger.info("---> Starting 1 remote enabled data node"); + addRemote = true; + String remoteNodeName1 = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals( + internalCluster().client() + .admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME, REPOSITORY_2_NAME) + .get() + .repositories() + .size(), + 2 + ); + + logger.info("---> Starting doc ingestion in parallel thread"); + AsyncIndexingService asyncIndexingService = new AsyncIndexingService(FAILOVER_REMOTE_TO_REMOTE); + asyncIndexingService.startIndexing(); + + logger.info("---> Moving primary copy from docrep node {} to remote enabled node {}", docrepNodeName, remoteNodeName1); + assertAcked( + internalCluster().client() + .admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(FAILOVER_REMOTE_TO_REMOTE, 0, docrepNodeName, remoteNodeName1)) + .get() + ); + ensureGreen(FAILOVER_REMOTE_TO_REMOTE); + assertEquals(primaryNodeName(FAILOVER_REMOTE_TO_REMOTE), remoteNodeName1); + + logger.info("---> Starting 1 more remote enabled data node"); + String remoteNodeName2 = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + + logger.info("---> Expanding index to 2 replica copies, on docrepNode and remoteNode2"); + assertAcked( + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings() + .setIndices(FAILOVER_REMOTE_TO_REMOTE) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2).build()) + .get() + ); + ensureGreen(FAILOVER_REMOTE_TO_REMOTE); + + logger.info("---> Stopping indexing thread"); + asyncIndexingService.stopIndexing(); + + refreshAndWaitForReplication(FAILOVER_REMOTE_TO_REMOTE); + Map shardStatsMap = internalCluster().client() + .admin() + .indices() + .prepareStats(FAILOVER_REMOTE_TO_REMOTE) + .setDocs(true) + .get() + .asMap(); + DiscoveryNodes nodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + long initialPrimaryDocCount = 0; + for (ShardRouting shardRouting : shardStatsMap.keySet()) { + if (shardRouting.primary()) { + assertTrue(nodes.get(shardRouting.currentNodeId()).isRemoteStoreNode()); + initialPrimaryDocCount = shardStatsMap.get(shardRouting).getStats().getDocs().getCount(); + } + } + int firstBatch = (int) asyncIndexingService.getIndexedDocs(); + assertReplicaAndPrimaryConsistency(FAILOVER_REMOTE_TO_REMOTE, firstBatch, 0); + + logger.info("---> Stop remote store enabled node hosting the primary"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(remoteNodeName1)); + ensureStableCluster(3); + ensureYellow(FAILOVER_REMOTE_TO_REMOTE); + DiscoveryNodes finalNodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + + waitUntil(() -> { + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + String nodeId = clusterState.getRoutingTable().index(FAILOVER_REMOTE_TO_REMOTE).shard(0).primaryShard().currentNodeId(); + if (nodeId == null) { + return false; + } else { + assertEquals(finalNodes.get(nodeId).getName(), remoteNodeName2); + return finalNodes.get(nodeId).isRemoteStoreNode(); + } + }); + + shardStatsMap = internalCluster().client().admin().indices().prepareStats(FAILOVER_REMOTE_TO_REMOTE).setDocs(true).get().asMap(); + long primaryDocCountAfterFailover = 0; + for (ShardRouting shardRouting : shardStatsMap.keySet()) { + if (shardRouting.primary()) { + assertTrue(finalNodes.get(shardRouting.currentNodeId()).isRemoteStoreNode()); + primaryDocCountAfterFailover = shardStatsMap.get(shardRouting).getStats().getDocs().getCount(); + } + } + assertEquals(initialPrimaryDocCount, primaryDocCountAfterFailover); + + logger.info("---> Index some more docs to ensure that the failed over primary is ingesting new docs"); + int secondBatch = randomIntBetween(1, 10); + logger.info("---> Indexing {} more docs", secondBatch); + indexBulk(FAILOVER_REMOTE_TO_REMOTE, secondBatch); + refreshAndWaitForReplication(FAILOVER_REMOTE_TO_REMOTE); + + shardStatsMap = internalCluster().client().admin().indices().prepareStats(FAILOVER_REMOTE_TO_REMOTE).setDocs(true).get().asMap(); + assertEquals(2, shardStatsMap.size()); + shardStatsMap.forEach( + (shardRouting, shardStats) -> { assertEquals(firstBatch + secondBatch, shardStats.getStats().getDocs().getCount()); } + ); + } + /* Scenario: - Starts 1 docrep backed data node diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java index 938a603c459c9..ab455f52c4195 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java @@ -67,6 +67,8 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.isMigratingToRemoteStore; + /** * {@link RoutingNodes} represents a copy the routing information contained in the {@link ClusterState cluster state}. * It can be either initialized as mutable or immutable (see {@link #RoutingNodes(ClusterState, boolean)}), allowing @@ -418,6 +420,20 @@ public ShardRouting activeReplicaWithOldestVersion(ShardId shardId) { .orElse(null); } + /** + * Returns one active replica shard on a remote node for the given shard id or null if + * no such replica is found. + *

+ * Since we aim to continue moving forward during remote store migration, replicas already migrated to remote nodes + * are preferred for primary promotion + */ + public ShardRouting activeReplicaOnRemoteNode(ShardId shardId) { + return assignedShards(shardId).stream().filter(shr -> !shr.primary() && shr.active()).filter((shr) -> { + RoutingNode nd = node(shr.currentNodeId()); + return (nd != null && nd.node().isRemoteStoreNode()); + }).findFirst().orElse(null); + } + /** * Returns true iff all replicas are active for the given shard routing. Otherwise false */ @@ -735,11 +751,17 @@ private void unassignPrimaryAndPromoteActiveReplicaIfExists( RoutingChangesObserver routingChangesObserver ) { assert failedShard.primary(); - ShardRouting activeReplica; - if (metadata.isSegmentReplicationEnabled(failedShard.getIndexName())) { - activeReplica = activeReplicaWithOldestVersion(failedShard.shardId()); - } else { - activeReplica = activeReplicaWithHighestVersion(failedShard.shardId()); + ShardRouting activeReplica = null; + if (isMigratingToRemoteStore(metadata)) { + // we might not find any replica on remote node + activeReplica = activeReplicaOnRemoteNode(failedShard.shardId()); + } + if (activeReplica == null) { + if (metadata.isSegmentReplicationEnabled(failedShard.getIndexName())) { + activeReplica = activeReplicaWithOldestVersion(failedShard.shardId()); + } else { + activeReplica = activeReplicaWithHighestVersion(failedShard.shardId()); + } } if (activeReplica == null) { moveToUnassigned(failedShard, unassignedInfo); diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java index 94b11086ba865..adfb751421db7 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -238,4 +239,17 @@ public static boolean isMigratingToRemoteStore(ClusterSettings clusterSettings) return (isMixedMode && isRemoteStoreMigrationDirection); } + + /** + * To check if the cluster is undergoing remote store migration using clusterState metadata + * @return + * true For REMOTE_STORE migration direction and MIXED compatibility mode, + * false otherwise + */ + public static boolean isMigratingToRemoteStore(Metadata metadata) { + boolean isMixedMode = REMOTE_STORE_COMPATIBILITY_MODE_SETTING.get(metadata.settings()).equals(CompatibilityMode.MIXED); + boolean isRemoteStoreMigrationDirection = MIGRATION_DIRECTION_SETTING.get(metadata.settings()).equals(Direction.REMOTE_STORE); + + return (isMixedMode && isRemoteStoreMigrationDirection); + } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedShardsRoutingTests.java index db4cedbbbe7b5..04e37e7d958d0 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -40,6 +40,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.RoutingTable; @@ -47,13 +48,18 @@ import org.opensearch.cluster.routing.allocation.command.AllocationCommands; import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.opensearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; +import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.test.VersionUtils; import java.util.ArrayList; import java.util.HashSet; +import java.util.List; +import java.util.Map; import java.util.Set; import static org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING; @@ -61,6 +67,10 @@ import static org.opensearch.cluster.routing.ShardRoutingState.RELOCATING; import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.opensearch.cluster.routing.ShardRoutingState.UNASSIGNED; +import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; @@ -812,4 +822,134 @@ private void testReplicaIsPromoted(boolean isSegmentReplicationEnabled) { } } } + + public void testPreferReplicaOnRemoteNodeForPrimaryPromotion() { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build()); + AllocationService allocation = createAllocationService(Settings.builder().build()); + + // segment replication enabled + Settings.Builder settingsBuilder = settings(Version.CURRENT).put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); + + // remote store migration metadata settings + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test").settings(settingsBuilder).numberOfShards(1).numberOfReplicas(4)) + .persistentSettings( + Settings.builder() + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), RemoteStoreNodeService.CompatibilityMode.MIXED.mode) + .put(MIGRATION_DIRECTION_SETTING.getKey(), RemoteStoreNodeService.Direction.REMOTE_STORE.direction) + .build() + ) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); + + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(initialRoutingTable) + .build(); + + ShardId shardId = new ShardId(metadata.index("test").getIndex(), 0); + + // add a remote node and start primary shard + Map remoteStoreNodeAttributes = Map.of( + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + "REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_VALUE" + ); + DiscoveryNode remoteNode1 = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(remoteNode1)).build(); + clusterState = ClusterState.builder(clusterState).routingTable(allocation.reroute(clusterState, "reroute").routingTable()).build(); + assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(4)); + + clusterState = startInitializingShardsAndReroute(allocation, clusterState); + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1)); + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(4)); + + // add remote and non-remote nodes and start replica shards + DiscoveryNode remoteNode2 = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + DiscoveryNode remoteNode3 = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + DiscoveryNode nonRemoteNode1 = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode nonRemoteNode2 = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + List replicaShardNodes = List.of(remoteNode2, remoteNode3, nonRemoteNode1, nonRemoteNode2); + + for (int i = 0; i < 4; i++) { + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(replicaShardNodes.get(i))) + .build(); + + clusterState = allocation.reroute(clusterState, "reroute"); + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1 + i)); + assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(4 - (i + 1))); + + clusterState = startInitializingShardsAndReroute(allocation, clusterState); + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1 + (i + 1))); + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(4 - (i + 1))); + } + + // fail primary shard + ShardRouting primaryShard0 = clusterState.routingTable().index("test").shard(0).primaryShard(); + ClusterState newState = allocation.applyFailedShard(clusterState, primaryShard0, randomBoolean()); + assertNotEquals(clusterState, newState); + clusterState = newState; + + // verify that promoted replica exists on a remote node + assertEquals(4, clusterState.getRoutingNodes().shardsWithState(STARTED).size()); + ShardRouting primaryShardRouting1 = clusterState.routingTable().index("test").shard(0).primaryShard(); + assertNotEquals(primaryShard0, primaryShardRouting1); + assertTrue( + primaryShardRouting1.currentNodeId().equals(remoteNode2.getId()) + || primaryShardRouting1.currentNodeId().equals(remoteNode3.getId()) + ); + + // fail primary shard again + newState = allocation.applyFailedShard(clusterState, primaryShardRouting1, randomBoolean()); + assertNotEquals(clusterState, newState); + clusterState = newState; + + // verify that promoted replica again exists on a remote node + assertEquals(3, clusterState.getRoutingNodes().shardsWithState(STARTED).size()); + ShardRouting primaryShardRouting2 = clusterState.routingTable().index("test").shard(0).primaryShard(); + assertNotEquals(primaryShardRouting1, primaryShardRouting2); + assertTrue( + primaryShardRouting2.currentNodeId().equals(remoteNode2.getId()) + || primaryShardRouting2.currentNodeId().equals(remoteNode3.getId()) + ); + assertNotEquals(primaryShardRouting1.currentNodeId(), primaryShardRouting2.currentNodeId()); + + ShardRouting expectedCandidateForSegRep = clusterState.getRoutingNodes().activeReplicaWithOldestVersion(shardId); + + // fail primary shard again + newState = allocation.applyFailedShard(clusterState, primaryShardRouting2, randomBoolean()); + assertNotEquals(clusterState, newState); + clusterState = newState; + + // verify that promoted replica exists on a non-remote node + assertEquals(2, clusterState.getRoutingNodes().shardsWithState(STARTED).size()); + ShardRouting primaryShardRouting3 = clusterState.routingTable().index("test").shard(0).primaryShard(); + assertNotEquals(primaryShardRouting2, primaryShardRouting3); + assertTrue( + primaryShardRouting3.currentNodeId().equals(nonRemoteNode1.getId()) + || primaryShardRouting3.currentNodeId().equals(nonRemoteNode2.getId()) + ); + assertEquals(expectedCandidateForSegRep.allocationId(), primaryShardRouting3.allocationId()); + } } From 2aad4998af0b20b28fd7dba009fdba4658bb130e Mon Sep 17 00:00:00 2001 From: Lakshya Taragi <157457166+ltaragi@users.noreply.github.com> Date: Tue, 23 Apr 2024 16:07:25 +0530 Subject: [PATCH 22/37] Prevent version upgrade during remote migration (#13185) Signed-off-by: Lakshya Taragi --- .../coordination/JoinTaskExecutor.java | 20 ++++++- .../coordination/JoinTaskExecutorTests.java | 59 +++++++++++++++++++ 2 files changed, 76 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 5d896e392e6bc..5475470b81b93 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -31,6 +31,7 @@ package org.opensearch.cluster.coordination; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.Version; import org.opensearch.cluster.ClusterState; @@ -57,6 +58,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Optional; import java.util.Set; @@ -66,6 +68,7 @@ import static org.opensearch.cluster.decommission.DecommissionHelper.nodeCommissioned; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode.MIXED; import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode.STRICT; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; @@ -78,7 +81,7 @@ public class JoinTaskExecutor implements ClusterStateTaskExecutor remoteDN = existingNodes.stream().filter(DiscoveryNode::isRemoteStoreNode).findFirst(); remoteDN.ifPresent(discoveryNode -> ensureRemoteStoreNodesCompatibility(joiningNode, discoveryNode)); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 5eafe63e63fad..3e343e95f6c4b 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -76,6 +76,7 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.test.VersionUtils.allOpenSearchVersions; import static org.opensearch.test.VersionUtils.allVersions; import static org.opensearch.test.VersionUtils.maxCompatibleVersion; import static org.opensearch.test.VersionUtils.randomCompatibleVersion; @@ -885,6 +886,64 @@ public void testUpdatesClusterStateWithMultiNodeClusterAndSameRepository() throw validateRepositoryMetadata(result.resultingState, clusterManagerNode, 2); } + public void testNodeJoinInMixedMode() { + Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + FeatureFlags.initializeFeatureFlags(nodeSettings); + + List versions = allOpenSearchVersions(); + assert versions.size() >= 2 : "test requires at least two open search versions"; + Version baseVersion = versions.get(versions.size() - 2); + Version higherVersion = versions.get(versions.size() - 1); + + DiscoveryNode currentNode1 = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), baseVersion); + DiscoveryNode currentNode2 = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), baseVersion); + DiscoveryNodes currentNodes = DiscoveryNodes.builder() + .add(currentNode1) + .localNodeId(currentNode1.getId()) + .add(currentNode2) + .localNodeId(currentNode2.getId()) + .build(); + + Settings mixedModeCompatibilitySettings = Settings.builder() + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), RemoteStoreNodeService.CompatibilityMode.MIXED) + .build(); + + Metadata metadata = Metadata.builder().persistentSettings(mixedModeCompatibilitySettings).build(); + + // joining node of a higher version than the current nodes + DiscoveryNode joiningNode1 = new DiscoveryNode( + randomAlphaOfLength(10), + randomAlphaOfLength(10), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO), + Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE), + higherVersion + ); + final IllegalStateException exception = expectThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(joiningNode1, currentNodes, metadata) + ); + String reason = String.format( + Locale.ROOT, + "remote migration : a node [%s] of higher version [%s] is not allowed to join a cluster with maximum version [%s]", + joiningNode1, + joiningNode1.getVersion(), + currentNodes.getMaxNodeVersion() + ); + assertEquals(reason, exception.getMessage()); + + // joining node of the same version as the current nodes + DiscoveryNode joiningNode2 = new DiscoveryNode( + randomAlphaOfLength(10), + randomAlphaOfLength(10), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO), + Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE), + baseVersion + ); + JoinTaskExecutor.ensureNodesCompatibility(joiningNode2, currentNodes, metadata); + } + private void validateRepositoryMetadata(ClusterState updatedState, DiscoveryNode existingNode, int expectedRepositories) throws Exception { From 41a30551d704a991f27ee9eabe5be8cd55910428 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Tue, 23 Apr 2024 19:35:15 +0800 Subject: [PATCH 23/37] Rename ingest processor supports overriding target field if exists (#12990) * Rename ingest processor supports overriding target field if exists Signed-off-by: Gao Binlong * Modify change log Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong --- CHANGELOG.md | 1 + .../ingest/common/RenameProcessor.java | 17 +++++-- .../common/DotExpanderProcessorTests.java | 1 + .../common/RenameProcessorFactoryTests.java | 13 +++++ .../ingest/common/RenameProcessorTests.java | 51 +++++++++++-------- .../test/ingest/280_rename_processor.yml | 39 ++++++++++++++ 6 files changed, 96 insertions(+), 26 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b5db1a9084572..e1184d48deb59 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Allow setting KEYSTORE_PASSWORD through env variable ([#12865](https://github.com/opensearch-project/OpenSearch/pull/12865)) - [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697)) - [Concurrent Segment Search] Disable concurrent segment search for system indices and throttled requests ([#12954](https://github.com/opensearch-project/OpenSearch/pull/12954)) +- Rename ingest processor supports overriding target field if exists ([#12990](https://github.com/opensearch-project/OpenSearch/pull/12990)) - [Tiered Caching] Make took time caching policy setting dynamic ([#13063](https://github.com/opensearch-project/OpenSearch/pull/13063)) - Derived fields support to derive field values at query time without indexing ([#12569](https://github.com/opensearch-project/OpenSearch/pull/12569)) - Detect breaking changes on pull requests ([#9044](https://github.com/opensearch-project/OpenSearch/pull/9044)) diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RenameProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RenameProcessor.java index 7564bbdf95f45..6ec3ebb6ace81 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RenameProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RenameProcessor.java @@ -52,18 +52,21 @@ public final class RenameProcessor extends AbstractProcessor { private final TemplateScript.Factory field; private final TemplateScript.Factory targetField; private final boolean ignoreMissing; + private final boolean overrideTarget; RenameProcessor( String tag, String description, TemplateScript.Factory field, TemplateScript.Factory targetField, - boolean ignoreMissing + boolean ignoreMissing, + boolean overrideTarget ) { super(tag, description); this.field = field; this.targetField = targetField; this.ignoreMissing = ignoreMissing; + this.overrideTarget = overrideTarget; } TemplateScript.Factory getField() { @@ -78,6 +81,10 @@ boolean isIgnoreMissing() { return ignoreMissing; } + boolean isOverrideTarget() { + return overrideTarget; + } + @Override public IngestDocument execute(IngestDocument document) { String path = document.renderTemplate(field); @@ -94,9 +101,10 @@ public IngestDocument execute(IngestDocument document) { // We fail here if the target field point to an array slot that is out of range. // If we didn't do this then we would fail if we set the value in the target_field // and then on failure processors would not see that value we tried to rename as we already - // removed it. + // removed it. If the target field is out of range, we throw the exception no matter + // what the parameter overrideTarget is. String target = document.renderTemplate(targetField); - if (document.hasField(target, true)) { + if (document.hasField(target, true) && !overrideTarget) { throw new IllegalArgumentException("field [" + target + "] already exists"); } @@ -143,7 +151,8 @@ public RenameProcessor create( scriptService ); boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); - return new RenameProcessor(processorTag, description, fieldTemplate, targetFieldTemplate, ignoreMissing); + boolean overrideTarget = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "override_target", false); + return new RenameProcessor(processorTag, description, fieldTemplate, targetFieldTemplate, ignoreMissing, overrideTarget); } } } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DotExpanderProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DotExpanderProcessorTests.java index cd912269a593d..73719b24c74ea 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DotExpanderProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DotExpanderProcessorTests.java @@ -105,6 +105,7 @@ public void testEscapeFields_valueField() throws Exception { null, new TestTemplateService.MockTemplateScript.Factory("foo"), new TestTemplateService.MockTemplateScript.Factory("foo.bar"), + false, false ); processor.execute(document); diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RenameProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RenameProcessorFactoryTests.java index ec43be97689ee..8ce9203db43ce 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RenameProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RenameProcessorFactoryTests.java @@ -77,6 +77,19 @@ public void testCreateWithIgnoreMissing() throws Exception { assertThat(renameProcessor.isIgnoreMissing(), equalTo(true)); } + public void testCreateWithOverrideTarget() throws Exception { + Map config = new HashMap<>(); + config.put("field", "old_field"); + config.put("target_field", "new_field"); + config.put("override_target", true); + String processorTag = randomAlphaOfLength(10); + RenameProcessor renameProcessor = factory.create(null, processorTag, null, config); + assertThat(renameProcessor.getTag(), equalTo(processorTag)); + assertThat(renameProcessor.getField().newInstance(Collections.emptyMap()).execute(), equalTo("old_field")); + assertThat(renameProcessor.getTargetField().newInstance(Collections.emptyMap()).execute(), equalTo("new_field")); + assertThat(renameProcessor.isOverrideTarget(), equalTo(true)); + } + public void testCreateNoFieldPresent() throws Exception { Map config = new HashMap<>(); config.put("target_field", "new_field"); diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RenameProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RenameProcessorTests.java index a600464371af8..ad5b46e924278 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RenameProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RenameProcessorTests.java @@ -59,7 +59,7 @@ public void testRename() throws Exception { do { newFieldName = RandomDocumentPicks.randomFieldName(random()); } while (RandomDocumentPicks.canAddField(newFieldName, ingestDocument) == false || newFieldName.equals(fieldName)); - Processor processor = createRenameProcessor(fieldName, newFieldName, false); + Processor processor = createRenameProcessor(fieldName, newFieldName, false, false); processor.execute(ingestDocument); assertThat(ingestDocument.getFieldValue(newFieldName, Object.class), equalTo(fieldValue)); } @@ -77,7 +77,7 @@ public void testRenameArrayElement() throws Exception { document.put("one", one); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); - Processor processor = createRenameProcessor("list.0", "item", false); + Processor processor = createRenameProcessor("list.0", "item", false, false); processor.execute(ingestDocument); Object actualObject = ingestDocument.getSourceAndMetadata().get("list"); assertThat(actualObject, instanceOf(List.class)); @@ -90,7 +90,7 @@ public void testRenameArrayElement() throws Exception { assertThat(actualObject, instanceOf(String.class)); assertThat(actualObject, equalTo("item1")); - processor = createRenameProcessor("list.0", "list.3", false); + processor = createRenameProcessor("list.0", "list.3", false, randomBoolean()); try { processor.execute(ingestDocument); fail("processor execute should have failed"); @@ -105,7 +105,7 @@ public void testRenameArrayElement() throws Exception { public void testRenameNonExistingField() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); String fieldName = RandomDocumentPicks.randomFieldName(random()); - Processor processor = createRenameProcessor(fieldName, RandomDocumentPicks.randomFieldName(random()), false); + Processor processor = createRenameProcessor(fieldName, RandomDocumentPicks.randomFieldName(random()), false, false); try { processor.execute(ingestDocument); fail("processor execute should have failed"); @@ -114,7 +114,7 @@ public void testRenameNonExistingField() throws Exception { } // when using template snippet, the resolved field path maybe empty - processor = createRenameProcessor("", RandomDocumentPicks.randomFieldName(random()), false); + processor = createRenameProcessor("", RandomDocumentPicks.randomFieldName(random()), false, false); try { processor.execute(ingestDocument); fail("processor execute should have failed"); @@ -127,30 +127,36 @@ public void testRenameNonExistingFieldWithIgnoreMissing() throws Exception { IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); String fieldName = RandomDocumentPicks.randomFieldName(random()); - Processor processor = createRenameProcessor(fieldName, RandomDocumentPicks.randomFieldName(random()), true); + Processor processor = createRenameProcessor(fieldName, RandomDocumentPicks.randomFieldName(random()), true, false); processor.execute(ingestDocument); assertIngestDocument(originalIngestDocument, ingestDocument); // when using template snippet, the resolved field path maybe empty - processor = createRenameProcessor("", RandomDocumentPicks.randomFieldName(random()), true); + processor = createRenameProcessor("", RandomDocumentPicks.randomFieldName(random()), true, false); processor.execute(ingestDocument); assertIngestDocument(originalIngestDocument, ingestDocument); } public void testRenameNewFieldAlreadyExists() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); - String fieldName = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument); - Processor processor = createRenameProcessor( - RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument), - fieldName, - false - ); + String field = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument); + Object fieldValue = ingestDocument.getFieldValue(field, Object.class); + String targetField = RandomDocumentPicks.addRandomField(random(), ingestDocument, RandomDocumentPicks.randomFieldValue(random())); + + Processor processor = createRenameProcessor(field, targetField, false, false); try { processor.execute(ingestDocument); fail("processor execute should have failed"); } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), equalTo("field [" + fieldName + "] already exists")); + assertThat(e.getMessage(), equalTo("field [" + targetField + "] already exists")); } + + Processor processorWithOverrideTarget = createRenameProcessor(field, targetField, false, true); + + processorWithOverrideTarget.execute(ingestDocument); + assertThat(ingestDocument.hasField(field), equalTo(false)); + assertThat(ingestDocument.hasField(targetField), equalTo(true)); + assertThat(ingestDocument.getFieldValue(targetField, Object.class), equalTo(fieldValue)); } public void testRenameExistingFieldNullValue() throws Exception { @@ -158,7 +164,7 @@ public void testRenameExistingFieldNullValue() throws Exception { String fieldName = RandomDocumentPicks.randomFieldName(random()); ingestDocument.setFieldValue(fieldName, null); String newFieldName = randomValueOtherThanMany(ingestDocument::hasField, () -> RandomDocumentPicks.randomFieldName(random())); - Processor processor = createRenameProcessor(fieldName, newFieldName, false); + Processor processor = createRenameProcessor(fieldName, newFieldName, false, false); processor.execute(ingestDocument); if (newFieldName.startsWith(fieldName + '.')) { assertThat(ingestDocument.getFieldValue(fieldName, Object.class), instanceOf(Map.class)); @@ -182,7 +188,7 @@ public Object put(String key, Object value) { source.put("list", Collections.singletonList("item")); IngestDocument ingestDocument = new IngestDocument(source, Collections.emptyMap()); - Processor processor = createRenameProcessor("list", "new_field", false); + Processor processor = createRenameProcessor("list", "new_field", false, false); try { processor.execute(ingestDocument); fail("processor execute should have failed"); @@ -206,7 +212,7 @@ public Object remove(Object key) { source.put("list", Collections.singletonList("item")); IngestDocument ingestDocument = new IngestDocument(source, Collections.emptyMap()); - Processor processor = createRenameProcessor("list", "new_field", false); + Processor processor = createRenameProcessor("list", "new_field", false, false); try { processor.execute(ingestDocument); fail("processor execute should have failed"); @@ -221,12 +227,12 @@ public void testRenameLeafIntoBranch() throws Exception { Map source = new HashMap<>(); source.put("foo", "bar"); IngestDocument ingestDocument = new IngestDocument(source, Collections.emptyMap()); - Processor processor1 = createRenameProcessor("foo", "foo.bar", false); + Processor processor1 = createRenameProcessor("foo", "foo.bar", false, false); processor1.execute(ingestDocument); assertThat(ingestDocument.getFieldValue("foo", Map.class), equalTo(Collections.singletonMap("bar", "bar"))); assertThat(ingestDocument.getFieldValue("foo.bar", String.class), equalTo("bar")); - Processor processor2 = createRenameProcessor("foo.bar", "foo.bar.baz", false); + Processor processor2 = createRenameProcessor("foo.bar", "foo.bar.baz", false, false); processor2.execute(ingestDocument); assertThat( ingestDocument.getFieldValue("foo", Map.class), @@ -236,18 +242,19 @@ public void testRenameLeafIntoBranch() throws Exception { assertThat(ingestDocument.getFieldValue("foo.bar.baz", String.class), equalTo("bar")); // for fun lets try to restore it (which don't allow today) - Processor processor3 = createRenameProcessor("foo.bar.baz", "foo", false); + Processor processor3 = createRenameProcessor("foo.bar.baz", "foo", false, false); Exception e = expectThrows(IllegalArgumentException.class, () -> processor3.execute(ingestDocument)); assertThat(e.getMessage(), equalTo("field [foo] already exists")); } - private RenameProcessor createRenameProcessor(String field, String targetField, boolean ignoreMissing) { + private RenameProcessor createRenameProcessor(String field, String targetField, boolean ignoreMissing, boolean overrideTarget) { return new RenameProcessor( randomAlphaOfLength(10), null, new TestTemplateService.MockTemplateScript.Factory(field), new TestTemplateService.MockTemplateScript.Factory(targetField), - ignoreMissing + ignoreMissing, + overrideTarget ); } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/280_rename_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/280_rename_processor.yml index 96b2256bcc1dc..0ef658896ff0a 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/280_rename_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/280_rename_processor.yml @@ -64,3 +64,42 @@ teardown: index: test id: 1 - match: { _source.message: "foo bar baz" } + +--- +"Test rename processor with override_target": + - skip: + version: " - 2.13.99" + reason: "introduced in 2.14.0" + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "rename" : { + "field" : "foo", + "target_field" : "bar", + "override_target" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { + foo: "foo", + bar: "bar" + } + + - do: + get: + index: test + id: 1 + - match: { _source: { "bar": "foo" } } From ae49fd29ee81d8ba64baa0a38a58bb639a446205 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Tue, 23 Apr 2024 17:42:52 +0530 Subject: [PATCH 24/37] Handle OpenSearchRejectedExecutionException while retrying refresh (#13301) Signed-off-by: Sachin Kale --- .../ReleasableRetryableRefreshListener.java | 7 ++++ ...leasableRetryableRefreshListenerTests.java | 42 +++++++++++++++++-- 2 files changed, 46 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/shard/ReleasableRetryableRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/ReleasableRetryableRefreshListener.java index 757275932c5f1..80daefc4482fc 100644 --- a/server/src/main/java/org/opensearch/index/shard/ReleasableRetryableRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/ReleasableRetryableRefreshListener.java @@ -13,6 +13,7 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; @@ -129,6 +130,12 @@ private void scheduleRetry(TimeValue interval, String retryThreadPoolName, boole ); scheduled = true; getLogger().info("Scheduled retry with didRefresh={}", didRefresh); + } catch (OpenSearchRejectedExecutionException e) { + if (e.isExecutorShutdown()) { + getLogger().info("Scheduling retry with didRefresh={} failed due to executor shut down", didRefresh); + } else { + throw e; + } } finally { if (scheduled == false) { retryScheduled.set(false); diff --git a/server/src/test/java/org/opensearch/index/shard/ReleasableRetryableRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/ReleasableRetryableRefreshListenerTests.java index a0641c365a2a1..e0ad09efac367 100644 --- a/server/src/test/java/org/opensearch/index/shard/ReleasableRetryableRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ReleasableRetryableRefreshListenerTests.java @@ -316,7 +316,32 @@ protected Logger getLogger() { public void testScheduleRetryAfterClose() throws Exception { // This tests that once the listener has been closed, even the retries would not be scheduled. final AtomicLong runCount = new AtomicLong(); - ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(threadPool) { + ReleasableRetryableRefreshListener testRefreshListener = getRetryableRefreshListener(runCount); + Thread thread1 = new Thread(() -> { + try { + testRefreshListener.afterRefresh(true); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + Thread thread2 = new Thread(() -> { + try { + Thread.sleep(500); + testRefreshListener.drainRefreshes(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + }); + thread1.start(); + thread2.start(); + thread1.join(); + thread2.join(); + assertBusy(() -> assertEquals(1, runCount.get())); + assertRefreshListenerClosed(testRefreshListener); + } + + private ReleasableRetryableRefreshListener getRetryableRefreshListener(AtomicLong runCount) { + return new ReleasableRetryableRefreshListener(threadPool) { @Override protected boolean performAfterRefreshWithPermit(boolean didRefresh) { try { @@ -341,6 +366,11 @@ protected String getRetryThreadPoolName() { return ThreadPool.Names.REMOTE_REFRESH_RETRY; } + @Override + protected boolean isRetryEnabled() { + return true; + } + @Override protected TimeValue getNextRetryInterval() { try { @@ -351,6 +381,12 @@ protected TimeValue getNextRetryInterval() { return TimeValue.timeValueMillis(100); } }; + } + + public void testScheduleRetryAfterThreadpoolShutdown() throws Exception { + // This tests that once the thread-pool is shut down, the exception is handled. + final AtomicLong runCount = new AtomicLong(); + ReleasableRetryableRefreshListener testRefreshListener = getRetryableRefreshListener(runCount); Thread thread1 = new Thread(() -> { try { testRefreshListener.afterRefresh(true); @@ -361,7 +397,7 @@ protected TimeValue getNextRetryInterval() { Thread thread2 = new Thread(() -> { try { Thread.sleep(500); - testRefreshListener.drainRefreshes(); + threadPool.shutdown(); } catch (InterruptedException e) { throw new AssertionError(e); } @@ -371,7 +407,7 @@ protected TimeValue getNextRetryInterval() { thread1.join(); thread2.join(); assertBusy(() -> assertEquals(1, runCount.get())); - assertRefreshListenerClosed(testRefreshListener); + assertFalse(testRefreshListener.getRetryScheduledStatus()); } public void testConcurrentScheduleRetry() throws Exception { From f41db2c8f78ae2365f428e692770be433a54de8a Mon Sep 17 00:00:00 2001 From: Sai Medhini Reddy Maryada <117196660+saimedhi@users.noreply.github.com> Date: Tue, 23 Apr 2024 07:18:47 -0700 Subject: [PATCH 25/37] Fixed skip version for Search Pipeline integration tests (#13344) Signed-off-by: saimedhi --- .../test/search_pipeline/10_basic.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search_pipeline/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search_pipeline/10_basic.yml index 60c0706415bc2..a4877975a0052 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search_pipeline/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search_pipeline/10_basic.yml @@ -1,8 +1,8 @@ --- "Test basic pipeline crud": - skip: - version: " - 2.6.99" - reason: "Added in 2.7.0" + version: " - 2.8.99" + reason: "Added in 2.9.0" - do: search_pipeline.put: id: "my_pipeline" @@ -32,8 +32,8 @@ --- "Test Put Versioned Pipeline": - skip: - version: " - 2.6.99" - reason: "Added in 2.7.0" + version: " - 2.8.99" + reason: "Added in 2.9.0" - do: search_pipeline.put: id: "my_pipeline" @@ -125,8 +125,8 @@ --- "Test Get All Pipelines": - skip: - version: " - 2.6.99" - reason: "Added in 2.7.0" + version: " - 2.8.99" + reason: "Added in 2.9.0" - do: search_pipeline.put: id: "first_pipeline" @@ -152,8 +152,8 @@ --- "Test invalid config": - skip: - version: " - 2.6.99" - reason: "Added in 2.7.0" + version: " - 2.8.99" + reason: "Added in 2.9.0" - do: catch: /parse_exception/ search_pipeline.put: From ab7e914c3956ac206cf269ca345574882770a283 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Tue, 23 Apr 2024 22:33:28 +0800 Subject: [PATCH 26/37] Fix from and size parameter can be negative when searching (#13047) * Fix from and size parameter can be negative when searching Signed-off-by: Gao Binlong * Add changelog Signed-off-by: Gao Binlong * fix test failure Signed-off-by: Gao Binlong * Fix test failure Signed-off-by: Gao Binlong * Optimize the code Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong --- CHANGELOG.md | 1 + .../test/search/360_from_and_size.yml | 113 ++++++++++++++++++ .../action/search/SearchRequest.java | 2 +- .../rest/action/search/RestSearchAction.java | 12 +- .../search/builder/SearchSourceBuilder.java | 6 +- .../builder/SearchSourceBuilderTests.java | 38 +++++- 6 files changed, 161 insertions(+), 11 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/360_from_and_size.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index e1184d48deb59..c0e966c49506c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -61,6 +61,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix issue with feature flags where default value may not be honored ([#12849](https://github.com/opensearch-project/OpenSearch/pull/12849)) - Fix UOE While building Exists query for nested search_as_you_type field ([#12048](https://github.com/opensearch-project/OpenSearch/pull/12048)) - Client with Java 8 runtime and Apache HttpClient 5 Transport fails with java.lang.NoSuchMethodError: java.nio.ByteBuffer.flip()Ljava/nio/ByteBuffer ([#13100](https://github.com/opensearch-project/opensearch-java/pull/13100)) +- Fix from and size parameter can be negative when searching ([#13047](https://github.com/opensearch-project/OpenSearch/pull/13047)) - Enabled mockTelemetryPlugin for IT and fixed OOM issues ([#13054](https://github.com/opensearch-project/OpenSearch/pull/13054)) - Fix implement mark() and markSupported() in class FilterStreamInput ([#13098](https://github.com/opensearch-project/OpenSearch/pull/13098)) - Fix snapshot _status API to return correct status for partial snapshots ([#12812](https://github.com/opensearch-project/OpenSearch/pull/12812)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/360_from_and_size.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/360_from_and_size.yml new file mode 100644 index 0000000000000..95bcb9e5326cb --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/360_from_and_size.yml @@ -0,0 +1,113 @@ +setup: + - do: + indices.create: + index: test_1 + - do: + index: + index: test_1 + id: 1 + body: { foo: bar } + - do: + index: + index: test_1 + id: 2 + body: { foo: bar } + - do: + index: + index: test_1 + id: 3 + body: { foo: bar } + + - do: + index: + index: test_1 + id: 4 + body: { foo: bar } + - do: + indices.refresh: + index: [test_1] + +--- +teardown: + - do: + indices.delete: + index: test_1 + ignore: 404 + +--- +"Throws exception if from or size query parameter is negative": + - skip: + version: " - 2.99.99" + reason: "fixed in 3.0.0" + - do: + catch: '/\[from\] parameter cannot be negative, found \[-5\]/' + search: + index: test_1 + from: -5 + size: 10 + body: + query: + match: + foo: bar + + - do: + catch: '/\[size\] parameter cannot be negative, found \[-1\]/' + search: + index: test_1 + from: 0 + size: -1 + body: + query: + match: + foo: bar + + - do: + search: + index: test_1 + from: 0 + size: 10 + body: + query: + match: + foo: bar + + - match: {hits.total.value: 4} + +--- +"Throws exception if from or size request body parameter is negative": + - skip: + version: " - 2.99.99" + reason: "fixed in 3.0.0" + - do: + catch: '/\[from\] parameter cannot be negative, found \[-5\]/' + search: + index: test_1 + body: + from: -5 + size: 10 + query: + match: + foo: bar + + - do: + catch: '/\[size\] parameter cannot be negative, found \[-1\]/' + search: + index: test_1 + body: + from: 0 + size: -1 + query: + match: + foo: bar + + - do: + search: + index: test_1 + body: + from: 0 + size: 10 + query: + match: + foo: bar + + - match: {hits.total.value: 4} diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java index 3b8a6937815aa..f3d9f77e2394c 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java @@ -497,7 +497,7 @@ public PointInTimeBuilder pointInTimeBuilder() { } /** - * The tye of search to execute. + * The type of search to execute. */ public SearchType searchType() { return searchType; diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java index 80dc34c4d5d68..3a6b45013e892 100644 --- a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java @@ -50,6 +50,7 @@ import org.opensearch.rest.action.RestCancellableNodeClient; import org.opensearch.rest.action.RestStatusToXContentListener; import org.opensearch.search.Scroll; +import org.opensearch.search.SearchService; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.fetch.StoredFieldsContext; import org.opensearch.search.fetch.subphase.FetchSourceContext; @@ -235,13 +236,12 @@ private static void parseSearchSource(final SearchSourceBuilder searchSourceBuil searchSourceBuilder.query(queryBuilder); } - int from = request.paramAsInt("from", -1); - if (from != -1) { - searchSourceBuilder.from(from); + if (request.hasParam("from")) { + searchSourceBuilder.from(request.paramAsInt("from", SearchService.DEFAULT_FROM)); } - int size = request.paramAsInt("size", -1); - if (size != -1) { - setSize.accept(size); + + if (request.hasParam("size")) { + setSize.accept(request.paramAsInt("size", SearchService.DEFAULT_SIZE)); } if (request.hasParam("explain")) { diff --git a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java index 182350c22f697..07248a0719c3a 100644 --- a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java @@ -418,7 +418,7 @@ public QueryBuilder postFilter() { */ public SearchSourceBuilder from(int from) { if (from < 0) { - throw new IllegalArgumentException("[from] parameter cannot be negative"); + throw new IllegalArgumentException("[from] parameter cannot be negative, found [" + from + "]"); } this.from = from; return this; @@ -1215,9 +1215,9 @@ public void parseXContent(XContentParser parser, boolean checkTrailingTokens) th currentFieldName = parser.currentName(); } else if (token.isValue()) { if (FROM_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - from = parser.intValue(); + from(parser.intValue()); } else if (SIZE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - size = parser.intValue(); + size(parser.intValue()); } else if (TIMEOUT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { timeout = TimeValue.parseTimeValue(parser.text(), null, TIMEOUT_FIELD.getPreferredName()); } else if (TERMINATE_AFTER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { diff --git a/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java b/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java index eea7b1829e9b0..5b1035e24185d 100644 --- a/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java @@ -571,7 +571,7 @@ public void testParseIndicesBoost() throws IOException { public void testNegativeFromErrors() { IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> new SearchSourceBuilder().from(-2)); - assertEquals("[from] parameter cannot be negative", expected.getMessage()); + assertEquals("[from] parameter cannot be negative, found [-2]", expected.getMessage()); } public void testNegativeSizeErrors() { @@ -582,6 +582,42 @@ public void testNegativeSizeErrors() { assertEquals("[size] parameter cannot be negative, found [-1]", expected.getMessage()); } + public void testParseFromAndSize() throws IOException { + int negativeFrom = randomIntBetween(-100, -1); + String restContent = " { \"from\": \"" + negativeFrom + "\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, restContent)) { + IllegalArgumentException expected = expectThrows( + IllegalArgumentException.class, + () -> SearchSourceBuilder.fromXContent(parser) + ); + assertEquals("[from] parameter cannot be negative, found [" + negativeFrom + "]", expected.getMessage()); + } + + int validFrom = randomIntBetween(0, 100); + restContent = " { \"from\": \"" + validFrom + "\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, restContent)) { + SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(parser); + assertEquals(validFrom, searchSourceBuilder.from()); + } + + int negativeSize = randomIntBetween(-100, -1); + restContent = " { \"size\": \"" + negativeSize + "\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, restContent)) { + IllegalArgumentException expected = expectThrows( + IllegalArgumentException.class, + () -> SearchSourceBuilder.fromXContent(parser) + ); + assertEquals("[size] parameter cannot be negative, found [" + negativeSize + "]", expected.getMessage()); + } + + int validSize = randomIntBetween(0, 100); + restContent = " { \"size\": \"" + validSize + "\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, restContent)) { + SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(parser); + assertEquals(validSize, searchSourceBuilder.size()); + } + } + private void assertIndicesBoostParseErrorMessage(String restContent, String expectedErrorMessage) throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, restContent)) { ParsingException e = expectThrows(ParsingException.class, () -> SearchSourceBuilder.fromXContent(parser)); From 97e319109b2159e349269e9188aeb94c72165685 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Tue, 23 Apr 2024 10:50:15 -0400 Subject: [PATCH 27/37] Bump com.github.spullara.mustache.java:compiler from 0.9.10 to 0.9.11 (#13329) * Bump com.github.spullara.mustache.java:compiler from 0.9.10 to 0.9.11 Signed-off-by: Craig Perkins * Add to CHANGELOG Signed-off-by: Craig Perkins * Fix error in yamlRestTest Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins --- CHANGELOG.md | 1 + modules/lang-mustache/build.gradle | 2 +- modules/lang-mustache/licenses/compiler-0.9.10.jar.sha1 | 1 - modules/lang-mustache/licenses/compiler-0.9.11.jar.sha1 | 1 + .../test/lang_mustache/20_render_search_template.yml | 4 ++-- 5 files changed, 5 insertions(+), 4 deletions(-) delete mode 100644 modules/lang-mustache/licenses/compiler-0.9.10.jar.sha1 create mode 100644 modules/lang-mustache/licenses/compiler-0.9.11.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index c0e966c49506c..22c3e0d787f58 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Update google dependencies in repository-gcs and discovery-gce ([#13213](https://github.com/opensearch-project/OpenSearch/pull/13213)) - Bump `com.google.apis:google-api-services-compute` from v1-rev235-1.25.0 to v1-rev20240407-2.0.0 ([#13333](https://github.com/opensearch-project/OpenSearch/pull/13333)) - Bump `commons-cli:commons-cli` from 1.6.0 to 1.7.0 ([#13331](https://github.com/opensearch-project/OpenSearch/pull/13331)) +- Bump `com.github.spullara.mustache.java:compiler` from 0.9.10 to 0.9.11 ([#13329](https://github.com/opensearch-project/OpenSearch/pull/13329)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle index 14eafd8d43e13..4bf75b593ebf2 100644 --- a/modules/lang-mustache/build.gradle +++ b/modules/lang-mustache/build.gradle @@ -38,7 +38,7 @@ opensearchplugin { } dependencies { - api "com.github.spullara.mustache.java:compiler:0.9.10" + api "com.github.spullara.mustache.java:compiler:0.9.11" } restResources { diff --git a/modules/lang-mustache/licenses/compiler-0.9.10.jar.sha1 b/modules/lang-mustache/licenses/compiler-0.9.10.jar.sha1 deleted file mode 100644 index 6336318c2ce1a..0000000000000 --- a/modules/lang-mustache/licenses/compiler-0.9.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6111ae24e3be9ecbd75f5fe908583fc14b4f0174 \ No newline at end of file diff --git a/modules/lang-mustache/licenses/compiler-0.9.11.jar.sha1 b/modules/lang-mustache/licenses/compiler-0.9.11.jar.sha1 new file mode 100644 index 0000000000000..a77675488b2e0 --- /dev/null +++ b/modules/lang-mustache/licenses/compiler-0.9.11.jar.sha1 @@ -0,0 +1 @@ +1bce858aca4f0ce93fdb939de8c8474431c06322 \ No newline at end of file diff --git a/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/20_render_search_template.yml b/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/20_render_search_template.yml index 946b63a65d923..a0f6828e66d79 100644 --- a/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/20_render_search_template.yml +++ b/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/20_render_search_template.yml @@ -47,7 +47,7 @@ - match: { template_output.aggs.my_terms.terms.field: "my_other_field" } - do: - catch: /Improperly.closed.variable.in.query-template/ + catch: /Improperly.closed.variable:.my_value.in.query-template/ render_search_template: body: { "source": { "query": { "match": { "text": "{{{my_value}}" } }, "aggs": { "my_terms": { "terms": { "field": "{{my_field}}" } } } }, "params": { "my_value": "bar", "my_field": "field1" } } --- @@ -99,7 +99,7 @@ - match: { template_output.size: 100 } - do: - catch: /Improperly.closed.variable.in.query-template/ + catch: /Improperly.closed.variable:.my_value.in.query-template/ render_search_template: body: { "source": "{ \"query\": { \"match\": { \"text\": \"{{{my_value}}\" } }, \"size\": {{my_size}} }", "params": { "my_value": "bar", "my_size": 100 } } From 3ea0a31f6f613ddd5b7bde0053195d5b212c813d Mon Sep 17 00:00:00 2001 From: Ashish Singh Date: Tue, 23 Apr 2024 20:48:28 +0530 Subject: [PATCH 28/37] Upload remote paths during index creation or full cluster upload (#13150) * Upload remote paths during index creation or full cluster upload Signed-off-by: Ashish Singh --- .../RemoteStoreUploadIndexPathIT.java | 108 ++++++ .../transfer/RemoteTransferContainer.java | 10 +- .../remote/IndexMetadataUploadListener.java | 49 +++ .../remote/RemoteClusterStateService.java | 130 ++++++-- .../index/remote/RemoteIndexPath.java | 159 +++++++++ .../index/remote/RemoteIndexPathUploader.java | 281 ++++++++++++++++ .../index/remote/RemoteStorePathStrategy.java | 4 + .../RemoteStorePathStrategyResolver.java | 2 + .../main/java/org/opensearch/node/Node.java | 17 +- .../blobstore/BaseBlobStoreFormat.java | 130 ++++++++ .../blobstore/ChecksumBlobStoreFormat.java | 74 +---- .../blobstore/ConfigBlobStoreFormat.java | 73 ++++ .../RemoteTransferContainerTests.java | 12 +- .../GatewayMetaStatePersistedStateTests.java | 24 +- .../RemoteClusterStateServiceTests.java | 10 +- .../index/remote/RemoteIndexPathTests.java | 140 ++++++++ .../remote/RemoteIndexPathUploaderTests.java | 314 ++++++++++++++++++ 17 files changed, 1409 insertions(+), 128 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreUploadIndexPathIT.java create mode 100644 server/src/main/java/org/opensearch/gateway/remote/IndexMetadataUploadListener.java create mode 100644 server/src/main/java/org/opensearch/index/remote/RemoteIndexPath.java create mode 100644 server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java create mode 100644 server/src/main/java/org/opensearch/repositories/blobstore/BaseBlobStoreFormat.java create mode 100644 server/src/main/java/org/opensearch/repositories/blobstore/ConfigBlobStoreFormat.java create mode 100644 server/src/test/java/org/opensearch/index/remote/RemoteIndexPathTests.java create mode 100644 server/src/test/java/org/opensearch/index/remote/RemoteIndexPathUploaderTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreUploadIndexPathIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreUploadIndexPathIT.java new file mode 100644 index 0000000000000..c39cec96aa476 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreUploadIndexPathIT.java @@ -0,0 +1,108 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.index.remote.RemoteIndexPath; +import org.opensearch.index.remote.RemoteStoreEnums; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.util.Locale; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteStoreUploadIndexPathIT extends RemoteStoreBaseIntegTestCase { + + private final String INDEX_NAME = "remote-store-test-idx-1"; + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true).build(); + } + + /** + * Checks that the remote index path file gets created for the intended remote store path type and does not get created + * wherever not required. + */ + public void testRemoteIndexPathFileCreation() throws ExecutionException, InterruptedException, IOException { + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + + // Case 1 - Hashed_prefix, we would need the remote index path file to be created. + client(clusterManagerNode).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), RemoteStoreEnums.PathType.HASHED_PREFIX) + ) + .get(); + + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 1)); + validateRemoteIndexPathFile(true); + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); + FileSystemUtils.deleteSubDirectories(translogRepoPath); + FileSystemUtils.deleteSubDirectories(segmentRepoPath); + + // Case 2 - Hashed_infix, we would not have the remote index path file created here. + client(clusterManagerNode).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), RemoteStoreEnums.PathType.HASHED_INFIX) + ) + .get(); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 1)); + validateRemoteIndexPathFile(false); + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); + + // Case 3 - fixed, we would not have the remote index path file created here either. + client(clusterManagerNode).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), RemoteStoreEnums.PathType.FIXED)) + .get(); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 1)); + validateRemoteIndexPathFile(false); + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); + + } + + private void validateRemoteIndexPathFile(boolean exists) { + String indexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + + assertEquals(exists, FileSystemUtils.exists(translogRepoPath.resolve(RemoteIndexPath.DIR))); + assertEquals( + exists, + FileSystemUtils.exists( + translogRepoPath.resolve(RemoteIndexPath.DIR) + .resolve(String.format(Locale.ROOT, RemoteIndexPath.FILE_NAME_FORMAT, indexUUID)) + ) + ); + assertEquals(exists, FileSystemUtils.exists(segmentRepoPath.resolve(RemoteIndexPath.DIR))); + assertEquals( + exists, + FileSystemUtils.exists( + segmentRepoPath.resolve(RemoteIndexPath.DIR) + .resolve(String.format(Locale.ROOT, RemoteIndexPath.FILE_NAME_FORMAT, indexUUID)) + ) + ); + } +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java b/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java index cd2ef22327ebb..cbd1852202d1c 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java @@ -52,7 +52,7 @@ public class RemoteTransferContainer implements Closeable { private final String remoteFileName; private final boolean failTransferIfFileExists; private final WritePriority writePriority; - private final long expectedChecksum; + private final Long expectedChecksum; private final OffsetRangeInputStreamSupplier offsetRangeInputStreamSupplier; private final boolean isRemoteDataIntegritySupported; private final AtomicBoolean readBlock = new AtomicBoolean(); @@ -79,7 +79,7 @@ public RemoteTransferContainer( boolean failTransferIfFileExists, WritePriority writePriority, OffsetRangeInputStreamSupplier offsetRangeInputStreamSupplier, - long expectedChecksum, + Long expectedChecksum, boolean isRemoteDataIntegritySupported ) { this( @@ -115,7 +115,7 @@ public RemoteTransferContainer( boolean failTransferIfFileExists, WritePriority writePriority, OffsetRangeInputStreamSupplier offsetRangeInputStreamSupplier, - long expectedChecksum, + Long expectedChecksum, boolean isRemoteDataIntegritySupported, Map metadata ) { @@ -230,7 +230,7 @@ private LocalStreamSupplier getMultipartStreamSupplier( } private boolean isRemoteDataIntegrityCheckPossible() { - return isRemoteDataIntegritySupported; + return isRemoteDataIntegritySupported && Objects.nonNull(expectedChecksum); } private void finalizeUpload(boolean uploadSuccessful) throws IOException { @@ -238,7 +238,7 @@ private void finalizeUpload(boolean uploadSuccessful) throws IOException { return; } - if (uploadSuccessful) { + if (uploadSuccessful && Objects.nonNull(expectedChecksum)) { long actualChecksum = getActualChecksum(); if (actualChecksum != expectedChecksum) { throw new CorruptIndexException( diff --git a/server/src/main/java/org/opensearch/gateway/remote/IndexMetadataUploadListener.java b/server/src/main/java/org/opensearch/gateway/remote/IndexMetadataUploadListener.java new file mode 100644 index 0000000000000..f9158c9260747 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/IndexMetadataUploadListener.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.core.action.ActionListener; +import org.opensearch.threadpool.ThreadPool; + +import java.util.List; +import java.util.Objects; +import java.util.concurrent.ExecutorService; + +/** + * Hook for running code that needs to be executed before the upload of index metadata. Here we have introduced a hook + * for index creation (also triggerred after enabling the remote cluster statement for the first time). The listener + * is intended to be run in parallel and async with the index metadata upload. + * + * @opensearch.internal + */ +public abstract class IndexMetadataUploadListener { + + private final ExecutorService executorService; + + public IndexMetadataUploadListener(ThreadPool threadPool, String threadPoolName) { + Objects.requireNonNull(threadPool); + Objects.requireNonNull(threadPoolName); + assert ThreadPool.THREAD_POOL_TYPES.containsKey(threadPoolName) && ThreadPool.Names.SAME.equals(threadPoolName) == false; + this.executorService = threadPool.executor(threadPoolName); + } + + /** + * Runs before the new index upload of index metadata (or first time upload). The caller is expected to trigger + * onSuccess or onFailure of the {@code ActionListener}. + * + * @param indexMetadataList list of index metadata of new indexes (or first time index metadata upload). + * @param actionListener listener to be invoked on success or failure. + */ + public final void onNewIndexUpload(List indexMetadataList, ActionListener actionListener) { + executorService.execute(() -> doOnNewIndexUpload(indexMetadataList, actionListener)); + } + + protected abstract void doOnNewIndexUpload(List indexMetadataList, ActionListener actionListener); +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index c892b475d71da..d2f927c827e5b 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -160,6 +160,7 @@ public class RemoteClusterStateService implements Closeable { private final Settings settings; private final LongSupplier relativeTimeNanosSupplier; private final ThreadPool threadpool; + private final List indexMetadataUploadListeners; private BlobStoreRepository blobStoreRepository; private BlobStoreTransferService blobStoreTransferService; private volatile TimeValue slowWriteLoggingThreshold; @@ -177,6 +178,7 @@ public class RemoteClusterStateService implements Closeable { // ToXContent Params with gateway mode. // We are using gateway context mode to persist all custom metadata. public static final ToXContent.Params FORMAT_PARAMS; + static { Map params = new HashMap<>(1); params.put(Metadata.CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_GATEWAY); @@ -189,7 +191,8 @@ public RemoteClusterStateService( Settings settings, ClusterSettings clusterSettings, LongSupplier relativeTimeNanosSupplier, - ThreadPool threadPool + ThreadPool threadPool, + List indexMetadataUploadListeners ) { assert isRemoteStoreClusterStateEnabled(settings) : "Remote cluster state is not enabled"; this.nodeId = nodeId; @@ -206,6 +209,7 @@ public RemoteClusterStateService( clusterSettings.addSettingsUpdateConsumer(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, this::setGlobalMetadataUploadTimeout); clusterSettings.addSettingsUpdateConsumer(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, this::setMetadataManifestUploadTimeout); this.remoteStateStats = new RemotePersistenceStats(); + this.indexMetadataUploadListeners = indexMetadataUploadListeners; } private BlobStoreTransferService getBlobStoreTransferService() { @@ -233,10 +237,12 @@ public ClusterMetadataManifest writeFullMetadata(ClusterState clusterState, Stri // Write globalMetadata String globalMetadataFile = writeGlobalMetadata(clusterState); + List toUpload = new ArrayList<>(clusterState.metadata().indices().values()); // any validations before/after upload ? final List allUploadedIndexMetadata = writeIndexMetadataParallel( clusterState, - new ArrayList<>(clusterState.metadata().indices().values()) + toUpload, + ClusterState.UNKNOWN_UUID.equals(previousClusterUUID) ? toUpload : Collections.emptyList() ); final ClusterMetadataManifest manifest = uploadManifest( clusterState, @@ -313,7 +319,7 @@ public ClusterMetadataManifest writeIncrementalMetadata( .collect(Collectors.toMap(UploadedIndexMetadata::getIndexName, Function.identity())); List toUpload = new ArrayList<>(); - + List newIndexMetadataList = new ArrayList<>(); for (final IndexMetadata indexMetadata : clusterState.metadata().indices().values()) { final Long previousVersion = previousStateIndexMetadataVersionByName.get(indexMetadata.getIndex().getName()); if (previousVersion == null || indexMetadata.getVersion() != previousVersion) { @@ -329,9 +335,13 @@ public ClusterMetadataManifest writeIncrementalMetadata( numIndicesUnchanged++; } previousStateIndexMetadataVersionByName.remove(indexMetadata.getIndex().getName()); + // Adding the indexMetadata to newIndexMetadataList if there is no previous version present for the index. + if (previousVersion == null) { + newIndexMetadataList.add(indexMetadata); + } } - List uploadedIndexMetadataList = writeIndexMetadataParallel(clusterState, toUpload); + List uploadedIndexMetadataList = writeIndexMetadataParallel(clusterState, toUpload, newIndexMetadataList); uploadedIndexMetadataList.forEach( uploadedIndexMetadata -> allUploadedIndexMetadata.put(uploadedIndexMetadata.getIndexName(), uploadedIndexMetadata) ); @@ -436,13 +446,18 @@ private String writeGlobalMetadata(ClusterState clusterState) throws IOException * Uploads provided IndexMetadata's to remote store in parallel. The call is blocking so the method waits for upload to finish and then return. * * @param clusterState current ClusterState - * @param toUpload list of IndexMetadata to upload + * @param toUpload list of IndexMetadata to upload * @return {@code List} list of IndexMetadata uploaded to remote */ - private List writeIndexMetadataParallel(ClusterState clusterState, List toUpload) - throws IOException { - List exceptionList = Collections.synchronizedList(new ArrayList<>(toUpload.size())); - final CountDownLatch latch = new CountDownLatch(toUpload.size()); + private List writeIndexMetadataParallel( + ClusterState clusterState, + List toUpload, + List newIndexMetadataList + ) throws IOException { + assert Objects.nonNull(indexMetadataUploadListeners) : "indexMetadataUploadListeners can not be null"; + int latchCount = toUpload.size() + indexMetadataUploadListeners.size(); + List exceptionList = Collections.synchronizedList(new ArrayList<>(latchCount)); + final CountDownLatch latch = new CountDownLatch(latchCount); List result = new ArrayList<>(toUpload.size()); LatchedActionListener latchedActionListener = new LatchedActionListener<>( @@ -467,6 +482,8 @@ private List writeIndexMetadataParallel(ClusterState clus writeIndexMetadataAsync(clusterState, indexMetadata, latchedActionListener); } + invokeIndexMetadataUploadListeners(newIndexMetadataList, latch, exceptionList); + try { if (latch.await(getIndexMetadataUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { RemoteStateTransferException ex = new RemoteStateTransferException( @@ -506,11 +523,63 @@ private List writeIndexMetadataParallel(ClusterState clus return result; } + /** + * Invokes the index metadata upload listener but does not wait for the execution to complete. + */ + private void invokeIndexMetadataUploadListeners( + List newIndexMetadataList, + CountDownLatch latch, + List exceptionList + ) { + for (IndexMetadataUploadListener listener : indexMetadataUploadListeners) { + String listenerName = listener.getClass().getSimpleName(); + listener.onNewIndexUpload( + newIndexMetadataList, + getIndexMetadataUploadActionListener(newIndexMetadataList, latch, exceptionList, listenerName) + ); + } + + } + + private ActionListener getIndexMetadataUploadActionListener( + List newIndexMetadataList, + CountDownLatch latch, + List exceptionList, + String listenerName + ) { + long startTime = System.nanoTime(); + return new LatchedActionListener<>( + ActionListener.wrap( + ignored -> logger.trace( + new ParameterizedMessage( + "{} : Invoked listener={} successfully tookTimeNs={}", + listenerName, + newIndexMetadataList, + (System.nanoTime() - startTime) + ) + ), + ex -> { + logger.error( + new ParameterizedMessage( + "{} : Exception during invocation of listener={} tookTimeNs={}", + listenerName, + newIndexMetadataList, + (System.nanoTime() - startTime) + ), + ex + ); + exceptionList.add(ex); + } + ), + latch + ); + } + /** * Allows async Upload of IndexMetadata to remote * - * @param clusterState current ClusterState - * @param indexMetadata {@link IndexMetadata} to upload + * @param clusterState current ClusterState + * @param indexMetadata {@link IndexMetadata} to upload * @param latchedActionListener listener to respond back on after upload finishes */ private void writeIndexMetadataAsync( @@ -659,16 +728,6 @@ private void writeMetadataManifest(String clusterName, String clusterUUID, Clust ); } - private String fetchPreviousClusterUUID(String clusterName, String clusterUUID) { - final Optional latestManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); - if (!latestManifest.isPresent()) { - final String previousClusterUUID = getLastKnownUUIDFromRemote(clusterName); - assert !clusterUUID.equals(previousClusterUUID) : "Last cluster UUID is same current cluster UUID"; - return previousClusterUUID; - } - return latestManifest.get().getPreviousClusterUUID(); - } - private BlobContainer indexMetadataContainer(String clusterName, String clusterUUID, String indexUUID) { // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/index/ftqsCnn9TgOX return blobStoreRepository.blobStore() @@ -737,7 +796,7 @@ static String getManifestFileName(long term, long version, boolean committed) { (committed ? "C" : "P"), // C for committed and P for published RemoteStoreUtils.invertLong(System.currentTimeMillis()), String.valueOf(MANIFEST_CURRENT_CODEC_VERSION) // Keep the codec version at last place only, during read we reads last place to - // determine codec version. + // determine codec version. ); } @@ -750,7 +809,7 @@ static String indexMetadataFileName(IndexMetadata indexMetadata) { RemoteStoreUtils.invertLong(indexMetadata.getVersion()), RemoteStoreUtils.invertLong(System.currentTimeMillis()), String.valueOf(INDEX_METADATA_CURRENT_CODEC_VERSION) // Keep the codec version at last place only, during read we reads last - // place to determine codec version. + // place to determine codec version. ); } @@ -772,8 +831,8 @@ private BlobPath getManifestFolderPath(String clusterName, String clusterUUID) { /** * Fetch latest index metadata from remote cluster state * - * @param clusterUUID uuid of cluster state to refer to in remote - * @param clusterName name of the cluster + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster * @param clusterMetadataManifest manifest file of cluster * @return {@code Map} latest IndexUUID to IndexMetadata map */ @@ -795,8 +854,8 @@ private Map getIndexMetadataMap( /** * Fetch index metadata from remote cluster state * - * @param clusterUUID uuid of cluster state to refer to in remote - * @param clusterName name of the cluster + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster * @param uploadedIndexMetadata {@link UploadedIndexMetadata} contains details about remote location of index metadata * @return {@link IndexMetadata} */ @@ -825,7 +884,6 @@ private IndexMetadata getIndexMetadata(String clusterName, String clusterUUID, U * @return {@link IndexMetadata} */ public ClusterState getLatestClusterState(String clusterName, String clusterUUID) { - start(); Optional clusterMetadataManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); if (clusterMetadataManifest.isEmpty()) { throw new IllegalStateException( @@ -989,6 +1047,7 @@ private List createClusterChain(final Map trimClusterUUIDs( @@ -1050,7 +1109,7 @@ private boolean isValidClusterUUID(ClusterMetadataManifest manifest) { * * @param clusterUUID uuid of cluster state to refer to in remote * @param clusterName name of the cluster - * @param limit max no of files to fetch + * @param limit max no of files to fetch * @return all manifest file names */ private List getManifestFileNames(String clusterName, String clusterUUID, int limit) throws IllegalStateException { @@ -1123,7 +1182,7 @@ private int getManifestCodecVersion(String fileName) { if (splitName.length == SPLITED_MANIFEST_FILE_LENGTH) { return Integer.parseInt(splitName[splitName.length - 1]); // Last value would be codec version. } else if (splitName.length < SPLITED_MANIFEST_FILE_LENGTH) { // Where codec is not part of file name, i.e. default codec version 0 - // is used. + // is used. return ClusterMetadataManifest.CODEC_V0; } else { throw new IllegalArgumentException("Manifest file name is corrupted"); @@ -1141,7 +1200,7 @@ public void writeMetadataFailed() { /** * Exception for Remote state transfer. */ - static class RemoteStateTransferException extends RuntimeException { + public static class RemoteStateTransferException extends RuntimeException { public RemoteStateTransferException(String errorDesc) { super(errorDesc); @@ -1155,7 +1214,7 @@ public RemoteStateTransferException(String errorDesc, Throwable cause) { /** * Purges all remote cluster state against provided cluster UUIDs * - * @param clusterName name of the cluster + * @param clusterName name of the cluster * @param clusterUUIDs clusteUUIDs for which the remote state needs to be purged */ void deleteStaleUUIDsClusterMetadata(String clusterName, List clusterUUIDs) { @@ -1188,8 +1247,8 @@ public void onFailure(Exception e) { /** * Deletes older than last {@code versionsToRetain} manifests. Also cleans up unreferenced IndexMetadata associated with older manifests * - * @param clusterName name of the cluster - * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @param clusterUUID uuid of cluster state to refer to in remote * @param manifestsToRetain no of latest manifest files to keep in remote */ // package private for testing @@ -1308,7 +1367,8 @@ private void deleteStalePaths(String clusterName, String clusterUUID, List> TRANSLOG_PATH = Map.of(TRANSLOG, List.of(DATA, METADATA)); + public static final Map> SEGMENT_PATH = Map.of(SEGMENTS, List.of(DataType.values())); + public static final Map> COMBINED_PATH; + + static { + Map> combinedPath = new HashMap<>(); + combinedPath.putAll(TRANSLOG_PATH); + combinedPath.putAll(SEGMENT_PATH); + COMBINED_PATH = Collections.unmodifiableMap(combinedPath); + } + private static final String DEFAULT_VERSION = "1"; + public static final String DIR = "remote-index-path"; + public static final String FILE_NAME_FORMAT = "remote_path_%s"; + static final String KEY_VERSION = "version"; + static final String KEY_INDEX_UUID = "index_uuid"; + static final String KEY_SHARD_COUNT = "shard_count"; + static final String KEY_PATH_CREATION_MAP = "path_creation_map"; + static final String KEY_PATHS = "paths"; + private final String indexUUID; + private final int shardCount; + private final Iterable basePath; + private final PathType pathType; + private final PathHashAlgorithm pathHashAlgorithm; + + /** + * This keeps the map of paths that would be present in the content of the index path file. For eg - It is possible + * that segment and translog repository can be different. For this use case, we have either segment or translog as the + * key, and list of data, metadata, and lock_files (only for segment) as the value. + */ + private final Map> pathCreationMap; + + public RemoteIndexPath( + String indexUUID, + int shardCount, + Iterable basePath, + PathType pathType, + PathHashAlgorithm pathHashAlgorithm, + Map> pathCreationMap + ) { + if (Objects.isNull(pathCreationMap) + || Objects.isNull(pathType) + || isCompatible(pathType, pathHashAlgorithm) == false + || shardCount < 1 + || Objects.isNull(basePath) + || pathCreationMap.isEmpty() + || pathCreationMap.keySet().stream().anyMatch(k -> pathCreationMap.get(k).isEmpty())) { + ParameterizedMessage parameterizedMessage = new ParameterizedMessage( + "Invalid input in RemoteIndexPath constructor indexUUID={} shardCount={} basePath={} pathType={}" + + " pathHashAlgorithm={} pathCreationMap={}", + indexUUID, + shardCount, + basePath, + pathType, + pathHashAlgorithm, + pathCreationMap + ); + throw new IllegalArgumentException(parameterizedMessage.getFormattedMessage()); + } + boolean validMap = pathCreationMap.keySet() + .stream() + .allMatch(k -> pathCreationMap.get(k).stream().allMatch(k::isSupportedDataType)); + if (validMap == false) { + throw new IllegalArgumentException( + new ParameterizedMessage("pathCreationMap={} is having illegal combination of category and type", pathCreationMap) + .getFormattedMessage() + ); + } + this.indexUUID = indexUUID; + this.shardCount = shardCount; + this.basePath = basePath; + this.pathType = pathType; + this.pathHashAlgorithm = pathHashAlgorithm; + this.pathCreationMap = pathCreationMap; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(KEY_VERSION, DEFAULT_VERSION); + builder.field(KEY_INDEX_UUID, indexUUID); + builder.field(KEY_SHARD_COUNT, shardCount); + builder.field(PathType.NAME, pathType.name()); + if (Objects.nonNull(pathHashAlgorithm)) { + builder.field(PathHashAlgorithm.NAME, pathHashAlgorithm.name()); + } + + Map> pathMap = new HashMap<>(); + for (Map.Entry> entry : pathCreationMap.entrySet()) { + pathMap.put(entry.getKey().getName(), entry.getValue().stream().map(DataType::getName).collect(Collectors.toList())); + } + builder.field(KEY_PATH_CREATION_MAP); + builder.map(pathMap); + builder.startArray(KEY_PATHS); + for (Map.Entry> entry : pathCreationMap.entrySet()) { + DataCategory dataCategory = entry.getKey(); + for (DataType type : entry.getValue()) { + for (int shardNo = 0; shardNo < shardCount; shardNo++) { + PathInput pathInput = PathInput.builder() + .basePath(new BlobPath().add(basePath)) + .indexUUID(indexUUID) + .shardId(Integer.toString(shardNo)) + .dataCategory(dataCategory) + .dataType(type) + .build(); + builder.value(pathType.path(pathInput, pathHashAlgorithm).buildAsString()); + } + } + } + builder.endArray(); + return builder; + } + + public static RemoteIndexPath fromXContent(XContentParser ignored) { + throw new UnsupportedOperationException("RemoteIndexPath.fromXContent() is not supported"); + } +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java b/server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java new file mode 100644 index 0000000000000..1ac7e41014d23 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java @@ -0,0 +1,281 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.Index; +import org.opensearch.gateway.remote.IndexMetadataUploadListener; +import org.opensearch.gateway.remote.RemoteClusterStateService.RemoteStateTransferException; +import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.blobstore.ConfigBlobStoreFormat; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static org.opensearch.gateway.remote.RemoteClusterStateService.INDEX_METADATA_UPLOAD_TIMEOUT_SETTING; +import static org.opensearch.index.remote.RemoteIndexPath.COMBINED_PATH; +import static org.opensearch.index.remote.RemoteIndexPath.SEGMENT_PATH; +import static org.opensearch.index.remote.RemoteIndexPath.TRANSLOG_PATH; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteDataAttributePresent; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; + +/** + * Uploads the remote store path for all possible combinations of {@link org.opensearch.index.remote.RemoteStoreEnums.DataCategory} + * and {@link org.opensearch.index.remote.RemoteStoreEnums.DataType} for each shard of an index. + * + * @opensearch.internal + */ +@ExperimentalApi +public class RemoteIndexPathUploader extends IndexMetadataUploadListener { + + public static final ConfigBlobStoreFormat REMOTE_INDEX_PATH_FORMAT = new ConfigBlobStoreFormat<>( + RemoteIndexPath.FILE_NAME_FORMAT + ); + + private static final String TIMEOUT_EXCEPTION_MSG = "Timed out waiting while uploading remote index path file for indexes=%s"; + private static final String UPLOAD_EXCEPTION_MSG = "Exception occurred while uploading remote index paths for indexes=%s"; + static final String TRANSLOG_REPO_NAME_KEY = Node.NODE_ATTRIBUTES.getKey() + + RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; + static final String SEGMENT_REPO_NAME_KEY = Node.NODE_ATTRIBUTES.getKey() + + RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; + + private static final Logger logger = LogManager.getLogger(RemoteIndexPathUploader.class); + + private final Settings settings; + private final boolean isRemoteDataAttributePresent; + private final boolean isTranslogSegmentRepoSame; + private final Supplier repositoriesService; + private volatile TimeValue indexMetadataUploadTimeout; + + private BlobStoreRepository translogRepository; + private BlobStoreRepository segmentRepository; + + public RemoteIndexPathUploader( + ThreadPool threadPool, + Settings settings, + Supplier repositoriesService, + ClusterSettings clusterSettings + ) { + super(threadPool, ThreadPool.Names.GENERIC); + this.settings = Objects.requireNonNull(settings); + this.repositoriesService = Objects.requireNonNull(repositoriesService); + isRemoteDataAttributePresent = isRemoteDataAttributePresent(settings); + // If the remote data attributes are not present, then there is no effect of translog and segment being same or different or null. + isTranslogSegmentRepoSame = isTranslogSegmentRepoSame(); + Objects.requireNonNull(clusterSettings); + indexMetadataUploadTimeout = clusterSettings.get(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING); + clusterSettings.addSettingsUpdateConsumer(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, this::setIndexMetadataUploadTimeout); + } + + @Override + protected void doOnNewIndexUpload(List indexMetadataList, ActionListener actionListener) { + if (isRemoteDataAttributePresent == false) { + logger.trace("Skipping beforeNewIndexUpload as there are no remote indexes"); + actionListener.onResponse(null); + return; + } + + long startTime = System.nanoTime(); + boolean success = false; + List eligibleList = indexMetadataList.stream().filter(this::requiresPathUpload).collect(Collectors.toList()); + String indexNames = eligibleList.stream().map(IndexMetadata::getIndex).map(Index::toString).collect(Collectors.joining(",")); + int latchCount = eligibleList.size() * (isTranslogSegmentRepoSame ? 1 : 2); + CountDownLatch latch = new CountDownLatch(latchCount); + List exceptionList = Collections.synchronizedList(new ArrayList<>(latchCount)); + try { + for (IndexMetadata indexMetadata : eligibleList) { + writeIndexPathAsync(indexMetadata, latch, exceptionList); + } + + logger.trace(new ParameterizedMessage("Remote index path upload started for {}", indexNames)); + + try { + if (latch.await(indexMetadataUploadTimeout.millis(), TimeUnit.MILLISECONDS) == false) { + RemoteStateTransferException ex = new RemoteStateTransferException( + String.format(Locale.ROOT, TIMEOUT_EXCEPTION_MSG, indexNames) + ); + exceptionList.forEach(ex::addSuppressed); + actionListener.onFailure(ex); + return; + } + } catch (InterruptedException exception) { + exceptionList.forEach(exception::addSuppressed); + RemoteStateTransferException ex = new RemoteStateTransferException( + String.format(Locale.ROOT, TIMEOUT_EXCEPTION_MSG, indexNames), + exception + ); + actionListener.onFailure(ex); + return; + } + if (exceptionList.size() > 0) { + RemoteStateTransferException ex = new RemoteStateTransferException( + String.format(Locale.ROOT, UPLOAD_EXCEPTION_MSG, indexNames) + ); + exceptionList.forEach(ex::addSuppressed); + actionListener.onFailure(ex); + return; + } + success = true; + actionListener.onResponse(null); + } catch (Exception exception) { + RemoteStateTransferException ex = new RemoteStateTransferException( + String.format(Locale.ROOT, UPLOAD_EXCEPTION_MSG, indexNames), + exception + ); + exceptionList.forEach(ex::addSuppressed); + actionListener.onFailure(ex); + } finally { + long tookTimeNs = System.nanoTime() - startTime; + logger.trace(new ParameterizedMessage("executed beforeNewIndexUpload status={} tookTimeNs={}", success, tookTimeNs)); + } + + } + + private void writeIndexPathAsync(IndexMetadata idxMD, CountDownLatch latch, List exceptionList) { + if (isTranslogSegmentRepoSame) { + // If the repositories are same, then we need to upload a single file containing paths for both translog and segments. + writePathToRemoteStore(idxMD, translogRepository, latch, exceptionList, COMBINED_PATH); + } else { + // If the repositories are different, then we need to upload one file per segment and translog containing their individual + // paths. + writePathToRemoteStore(idxMD, translogRepository, latch, exceptionList, TRANSLOG_PATH); + writePathToRemoteStore(idxMD, segmentRepository, latch, exceptionList, SEGMENT_PATH); + } + } + + private void writePathToRemoteStore( + IndexMetadata idxMD, + BlobStoreRepository repository, + CountDownLatch latch, + List exceptionList, + Map> pathCreationMap + ) { + Map remoteCustomData = idxMD.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); + RemoteStoreEnums.PathType pathType = RemoteStoreEnums.PathType.valueOf(remoteCustomData.get(RemoteStoreEnums.PathType.NAME)); + RemoteStoreEnums.PathHashAlgorithm hashAlgorithm = RemoteStoreEnums.PathHashAlgorithm.valueOf( + remoteCustomData.get(RemoteStoreEnums.PathHashAlgorithm.NAME) + ); + String indexUUID = idxMD.getIndexUUID(); + int shardCount = idxMD.getNumberOfShards(); + BlobPath basePath = repository.basePath(); + BlobContainer blobContainer = repository.blobStore().blobContainer(basePath.add(RemoteIndexPath.DIR)); + ActionListener actionListener = getUploadPathLatchedActionListener(idxMD, latch, exceptionList, pathCreationMap); + try { + REMOTE_INDEX_PATH_FORMAT.writeAsyncWithUrgentPriority( + new RemoteIndexPath(indexUUID, shardCount, basePath, pathType, hashAlgorithm, pathCreationMap), + blobContainer, + indexUUID, + actionListener + ); + } catch (IOException ioException) { + RemoteStateTransferException ex = new RemoteStateTransferException( + String.format(Locale.ROOT, UPLOAD_EXCEPTION_MSG, List.of(idxMD.getIndex().getName())) + ); + actionListener.onFailure(ioException); + } + } + + private Repository validateAndGetRepository(String repoSetting) { + final String repo = settings.get(repoSetting); + assert repo != null : "Remote " + repoSetting + " repository is not configured"; + final Repository repository = repositoriesService.get().repository(repo); + assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository"; + return repository; + } + + public void start() { + assert isRemoteStoreClusterStateEnabled(settings) == true : "Remote cluster state is not enabled"; + if (isRemoteDataAttributePresent == false) { + // If remote store data attributes are not present than we skip this. + return; + } + translogRepository = (BlobStoreRepository) validateAndGetRepository(TRANSLOG_REPO_NAME_KEY); + segmentRepository = (BlobStoreRepository) validateAndGetRepository(SEGMENT_REPO_NAME_KEY); + } + + private boolean isTranslogSegmentRepoSame() { + String translogRepoName = settings.get(TRANSLOG_REPO_NAME_KEY); + String segmentRepoName = settings.get(SEGMENT_REPO_NAME_KEY); + return Objects.equals(translogRepoName, segmentRepoName); + } + + private LatchedActionListener getUploadPathLatchedActionListener( + IndexMetadata indexMetadata, + CountDownLatch latch, + List exceptionList, + Map> pathCreationMap + ) { + return new LatchedActionListener<>( + ActionListener.wrap( + resp -> logger.trace( + new ParameterizedMessage("Index path uploaded for {} indexMetadata={}", pathCreationMap, indexMetadata) + ), + ex -> { + logger.error( + new ParameterizedMessage( + "Exception during Index path upload for {} indexMetadata={}", + pathCreationMap, + indexMetadata + ), + ex + ); + exceptionList.add(ex); + } + ), + latch + ); + } + + /** + * This method checks if the index metadata has attributes that calls for uploading the index path for remote store + * uploads. It checks if the remote store path type is {@code HASHED_PREFIX} and returns true if so. + */ + private boolean requiresPathUpload(IndexMetadata indexMetadata) { + // A cluster will have remote custom metadata only if the cluster is remote store enabled from data side. + Map remoteCustomData = indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); + if (Objects.isNull(remoteCustomData) || remoteCustomData.isEmpty()) { + return false; + } + String pathTypeStr = remoteCustomData.get(RemoteStoreEnums.PathType.NAME); + if (Objects.isNull(pathTypeStr)) { + return false; + } + // We need to upload the path only if the path type for an index is hashed_prefix + return RemoteStoreEnums.PathType.HASHED_PREFIX == RemoteStoreEnums.PathType.parseString(pathTypeStr); + } + + private void setIndexMetadataUploadTimeout(TimeValue newIndexMetadataUploadTimeout) { + this.indexMetadataUploadTimeout = newIndexMetadataUploadTimeout; + } +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java index 775f8fe19e4ef..c58f6c3faac84 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.index.remote.RemoteStoreEnums.DataCategory; @@ -25,6 +26,7 @@ * @opensearch.internal */ @PublicApi(since = "2.14.0") +@ExperimentalApi public class RemoteStorePathStrategy { private final PathType type; @@ -74,6 +76,7 @@ public BlobPath generatePath(PathInput pathInput) { * @opensearch.internal */ @PublicApi(since = "2.14.0") + @ExperimentalApi public static class PathInput { private final BlobPath basePath; private final String indexUUID; @@ -122,6 +125,7 @@ public static Builder builder() { * @opensearch.internal */ @PublicApi(since = "2.14.0") + @ExperimentalApi public static class Builder { private BlobPath basePath; private String indexUUID; diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java index a33f7522daaae..5cd69dfa679a5 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java @@ -9,6 +9,7 @@ package org.opensearch.index.remote; import org.opensearch.Version; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.indices.RemoteStoreSettings; @@ -20,6 +21,7 @@ * * @opensearch.internal */ +@ExperimentalApi public class RemoteStorePathStrategyResolver { private final RemoteStoreSettings remoteStoreSettings; diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index a33fd71e21896..47f128af438a6 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -146,6 +146,7 @@ import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.engine.EngineFactory; import org.opensearch.index.recovery.RemoteStoreRestoreService; +import org.opensearch.index.remote.RemoteIndexPathUploader; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.remote.filecache.FileCache; @@ -726,17 +727,26 @@ protected Node( threadPool::relativeTimeInMillis ); final RemoteClusterStateService remoteClusterStateService; + final RemoteIndexPathUploader remoteIndexPathUploader; if (isRemoteStoreClusterStateEnabled(settings)) { + remoteIndexPathUploader = new RemoteIndexPathUploader( + threadPool, + settings, + repositoriesServiceReference::get, + clusterService.getClusterSettings() + ); remoteClusterStateService = new RemoteClusterStateService( nodeEnvironment.nodeId(), repositoriesServiceReference::get, settings, clusterService.getClusterSettings(), threadPool::preciseRelativeTimeInNanos, - threadPool + threadPool, + List.of(remoteIndexPathUploader) ); } else { remoteClusterStateService = null; + remoteIndexPathUploader = null; } // collect engine factory providers from plugins @@ -1313,6 +1323,7 @@ protected Node( b.bind(SearchRequestSlowLog.class).toInstance(searchRequestSlowLog); b.bind(MetricsRegistry.class).toInstance(metricsRegistry); b.bind(RemoteClusterStateService.class).toProvider(() -> remoteClusterStateService); + b.bind(RemoteIndexPathUploader.class).toProvider(() -> remoteIndexPathUploader); b.bind(PersistedStateRegistry.class).toInstance(persistedStateRegistry); b.bind(SegmentReplicationStatsTracker.class).toInstance(segmentReplicationStatsTracker); b.bind(SearchRequestOperationsCompositeListenerFactory.class).toInstance(searchRequestOperationsCompositeListenerFactory); @@ -1462,6 +1473,10 @@ public Node start() throws NodeValidationException { if (remoteClusterStateService != null) { remoteClusterStateService.start(); } + final RemoteIndexPathUploader remoteIndexPathUploader = injector.getInstance(RemoteIndexPathUploader.class); + if (remoteIndexPathUploader != null) { + remoteIndexPathUploader.start(); + } // Load (and maybe upgrade) the metadata stored on disk final GatewayMetaState gatewayMetaState = injector.getInstance(GatewayMetaState.class); gatewayMetaState.start( diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BaseBlobStoreFormat.java b/server/src/main/java/org/opensearch/repositories/blobstore/BaseBlobStoreFormat.java new file mode 100644 index 0000000000000..262a32fa1e74d --- /dev/null +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BaseBlobStoreFormat.java @@ -0,0 +1,130 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.blobstore; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.OutputStreamIndexOutput; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.lucene.store.IndexOutputOutputStream; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.Locale; +import java.util.Objects; + +/** + * Provides common methods, variables that can be used by the implementors. + * + * @opensearch.internal + */ +public class BaseBlobStoreFormat { + + private static final int BUFFER_SIZE = 4096; + + private final String blobNameFormat; + + private final boolean skipHeaderFooter; + + /** + * @param blobNameFormat format of the blobname in {@link String#format} format + */ + public BaseBlobStoreFormat(String blobNameFormat, boolean skipHeaderFooter) { + this.blobNameFormat = blobNameFormat; + this.skipHeaderFooter = skipHeaderFooter; + } + + protected String blobName(String name) { + return String.format(Locale.ROOT, blobNameFormat, name); + } + + /** + * Writes blob with resolving the blob name using {@link #blobName} method. + *

+ * The blob will optionally by compressed. + * + * @param obj object to be serialized + * @param blobContainer blob container + * @param name blob name + * @param compressor whether to use compression + * @param params ToXContent params + * @param codec codec used + * @param version version used + */ + protected void write( + final T obj, + final BlobContainer blobContainer, + final String name, + final Compressor compressor, + final ToXContent.Params params, + XContentType xContentType, + String codec, + Integer version + ) throws IOException { + final String blobName = blobName(name); + final BytesReference bytes = serialize(obj, blobName, compressor, params, xContentType, codec, version); + blobContainer.writeBlob(blobName, bytes.streamInput(), bytes.length(), false); + } + + public BytesReference serialize( + final T obj, + final String blobName, + final Compressor compressor, + final ToXContent.Params params, + XContentType xContentType, + String codec, + Integer version + ) throws IOException { + assert skipHeaderFooter || (Objects.nonNull(codec) && Objects.nonNull(version)); + try (BytesStreamOutput outputStream = new BytesStreamOutput()) { + try ( + OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput( + "ChecksumBlobStoreFormat.writeBlob(blob=\"" + blobName + "\")", + blobName, + outputStream, + BUFFER_SIZE + ) + ) { + if (skipHeaderFooter == false) { + CodecUtil.writeHeader(indexOutput, codec, version); + } + try (OutputStream indexOutputOutputStream = new IndexOutputOutputStream(indexOutput) { + @Override + public void close() { + // this is important since some of the XContentBuilders write bytes on close. + // in order to write the footer we need to prevent closing the actual index input. + } + }; + XContentBuilder builder = MediaTypeRegistry.contentBuilder( + xContentType, + compressor.threadLocalOutputStream(indexOutputOutputStream) + ) + ) { + builder.startObject(); + obj.toXContent(builder, params); + builder.endObject(); + } + if (skipHeaderFooter == false) { + CodecUtil.writeFooter(indexOutput); + } + } + return outputStream.bytes(); + } + } + + protected String getBlobNameFormat() { + return blobNameFormat; + } +} diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java index 3e6052a5ef820..e567e1b626c5a 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -38,7 +38,6 @@ import org.apache.lucene.store.ByteBuffersDataInput; import org.apache.lucene.store.ByteBuffersIndexInput; import org.apache.lucene.store.IndexInput; -import org.apache.lucene.store.OutputStreamIndexOutput; import org.apache.lucene.util.BytesRef; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.CheckedFunction; @@ -48,26 +47,21 @@ import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; import org.opensearch.common.blobstore.transfer.stream.OffsetRangeIndexInputStream; import org.opensearch.common.io.Streams; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.store.ByteArrayIndexInput; -import org.opensearch.common.lucene.store.IndexOutputOutputStream; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.compress.Compressor; -import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.gateway.CorruptStateException; import org.opensearch.index.store.exception.ChecksumCombinationException; import org.opensearch.snapshots.SnapshotInfo; import java.io.IOException; -import java.io.OutputStream; import java.util.Arrays; import java.util.HashMap; import java.util.Locale; @@ -80,7 +74,7 @@ * * @opensearch.internal */ -public final class ChecksumBlobStoreFormat { +public final class ChecksumBlobStoreFormat extends BaseBlobStoreFormat { // Serialization parameters to specify correct context for metadata serialization public static final ToXContent.Params SNAPSHOT_ONLY_FORMAT_PARAMS; @@ -98,12 +92,8 @@ public final class ChecksumBlobStoreFormat { // The format version public static final int VERSION = 1; - private static final int BUFFER_SIZE = 4096; - private final String codec; - private final String blobNameFormat; - private final CheckedFunction reader; /** @@ -112,8 +102,8 @@ public final class ChecksumBlobStoreFormat { * @param reader prototype object that can deserialize T from XContent */ public ChecksumBlobStoreFormat(String codec, String blobNameFormat, CheckedFunction reader) { + super(blobNameFormat, false); this.reader = reader; - this.blobNameFormat = blobNameFormat; this.codec = codec; } @@ -130,7 +120,7 @@ public T read(BlobContainer blobContainer, String name, NamedXContentRegistry na } public String blobName(String name) { - return String.format(Locale.ROOT, blobNameFormat, name); + return String.format(Locale.ROOT, getBlobNameFormat(), name); } public T deserialize(String blobName, NamedXContentRegistry namedXContentRegistry, BytesReference bytes) throws IOException { @@ -170,30 +160,7 @@ public T deserialize(String blobName, NamedXContentRegistry namedXContentRegistr * @param compressor whether to use compression */ public void write(final T obj, final BlobContainer blobContainer, final String name, final Compressor compressor) throws IOException { - write(obj, blobContainer, name, compressor, SNAPSHOT_ONLY_FORMAT_PARAMS); - } - - /** - * Writes blob with resolving the blob name using {@link #blobName} method. - *

- * The blob will optionally by compressed. - * - * @param obj object to be serialized - * @param blobContainer blob container - * @param name blob name - * @param compressor whether to use compression - * @param params ToXContent params - */ - public void write( - final T obj, - final BlobContainer blobContainer, - final String name, - final Compressor compressor, - final ToXContent.Params params - ) throws IOException { - final String blobName = blobName(name); - final BytesReference bytes = serialize(obj, blobName, compressor, params); - blobContainer.writeBlob(blobName, bytes.streamInput(), bytes.length(), false); + write(obj, blobContainer, name, compressor, SNAPSHOT_ONLY_FORMAT_PARAMS, XContentType.SMILE, codec, VERSION); } /** @@ -251,7 +218,7 @@ private void writeAsyncWithPriority( final ToXContent.Params params ) throws IOException { if (blobContainer instanceof AsyncMultiStreamBlobContainer == false) { - write(obj, blobContainer, name, compressor, params); + write(obj, blobContainer, name, compressor, params, XContentType.SMILE, codec, VERSION); listener.onResponse(null); return; } @@ -290,35 +257,6 @@ private void writeAsyncWithPriority( public BytesReference serialize(final T obj, final String blobName, final Compressor compressor, final ToXContent.Params params) throws IOException { - try (BytesStreamOutput outputStream = new BytesStreamOutput()) { - try ( - OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput( - "ChecksumBlobStoreFormat.writeBlob(blob=\"" + blobName + "\")", - blobName, - outputStream, - BUFFER_SIZE - ) - ) { - CodecUtil.writeHeader(indexOutput, codec, VERSION); - try (OutputStream indexOutputOutputStream = new IndexOutputOutputStream(indexOutput) { - @Override - public void close() throws IOException { - // this is important since some of the XContentBuilders write bytes on close. - // in order to write the footer we need to prevent closing the actual index input. - } - }; - XContentBuilder builder = MediaTypeRegistry.contentBuilder( - XContentType.SMILE, - compressor.threadLocalOutputStream(indexOutputOutputStream) - ) - ) { - builder.startObject(); - obj.toXContent(builder, params); - builder.endObject(); - } - CodecUtil.writeFooter(indexOutput); - } - return outputStream.bytes(); - } + return serialize(obj, blobName, compressor, params, XContentType.SMILE, codec, VERSION); } } diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/ConfigBlobStoreFormat.java b/server/src/main/java/org/opensearch/repositories/blobstore/ConfigBlobStoreFormat.java new file mode 100644 index 0000000000000..18c718ca2110e --- /dev/null +++ b/server/src/main/java/org/opensearch/repositories/blobstore/ConfigBlobStoreFormat.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.blobstore; + +import org.apache.lucene.store.IndexInput; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; +import org.opensearch.common.blobstore.transfer.stream.OffsetRangeIndexInputStream; +import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.compress.NoneCompressor; +import org.opensearch.core.xcontent.ToXContent; + +import java.io.IOException; + +/** + * Format for writing short configurations to remote. Read interface does not exist as it not yet required. This format + * should be used for writing data from in-memory to remote store where there is no need for checksum and the client + * library for the remote store has inbuilt checksum capabilities while upload and download both. This format would + * serialise the data in Json format and store it on remote store as is. This does not support compression yet (this + * can be changed as required). In comparison to {@link ChecksumBlobStoreFormat}, this format does not add any additional + * metadata (like header and footer) to the content. Hence, this format does not depend on {@code CodecUtil} from + * Lucene library. + * + * @opensearch.internal + */ +public class ConfigBlobStoreFormat extends BaseBlobStoreFormat { + + /** + * @param blobNameFormat format of the blobname in {@link String#format} format + */ + public ConfigBlobStoreFormat(String blobNameFormat) { + super(blobNameFormat, true); + } + + public void writeAsyncWithUrgentPriority(T obj, BlobContainer blobContainer, String name, ActionListener listener) + throws IOException { + if (blobContainer instanceof AsyncMultiStreamBlobContainer == false) { + write(obj, blobContainer, name, new NoneCompressor(), ToXContent.EMPTY_PARAMS, XContentType.JSON, null, null); + listener.onResponse(null); + return; + } + String blobName = blobName(name); + BytesReference bytes = serialize(obj, blobName, new NoneCompressor(), ToXContent.EMPTY_PARAMS, XContentType.JSON, null, null); + String resourceDescription = "BlobStoreFormat.writeAsyncWithPriority(blob=\"" + blobName + "\")"; + try (IndexInput input = new ByteArrayIndexInput(resourceDescription, BytesReference.toBytes(bytes))) { + try ( + RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( + blobName, + blobName, + bytes.length(), + true, + WritePriority.URGENT, + (size, position) -> new OffsetRangeIndexInputStream(input, size, position), + null, + false + ) + ) { + ((AsyncMultiStreamBlobContainer) blobContainer).asyncBlobUpload(remoteTransferContainer.createWriteContext(), listener); + } + } + } +} diff --git a/server/src/test/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainerTests.java b/server/src/test/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainerTests.java index 074f659850c7b..44f6a0e11251c 100644 --- a/server/src/test/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainerTests.java +++ b/server/src/test/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainerTests.java @@ -67,7 +67,7 @@ public OffsetRangeInputStream get(long size, long position) throws IOException { return new OffsetRangeFileInputStream(testFile, size, position); } }, - 0, + 0L, false ) ) { @@ -89,7 +89,7 @@ public OffsetRangeInputStream get(long size, long position) throws IOException { return new OffsetRangeFileInputStream(testFile, size, position); } }, - 0, + 0L, false ) ) { @@ -155,7 +155,7 @@ public OffsetRangeInputStream get(long size, long position) throws IOException { return new OffsetRangeFileInputStream(testFile, size, position); } }, - 0, + 0L, false ) ) { @@ -223,7 +223,7 @@ public OffsetRangeInputStream get(long size, long position) throws IOException { return new OffsetRangeFileInputStream(testFile, size, position); } }, - 0, + 0L, isRemoteDataIntegritySupported ) ) { @@ -286,7 +286,7 @@ public OffsetRangeInputStream get(long size, long position) throws IOException { return new RateLimitingOffsetRangeInputStream(offsetRangeIndexInputStream, rateLimiterSupplier, null); } }, - 0, + 0L, true ) ) { @@ -347,7 +347,7 @@ public OffsetRangeInputStream get(long size, long position) throws IOException { return new RateLimitingOffsetRangeInputStream(offsetRangeIndexInputStream, rateLimiterSupplier, null); } }, - 0, + 0L, true ) ) { diff --git a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java index 74bae7b5eb7cf..3ba98c44f8d3e 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -71,6 +71,7 @@ import org.opensearch.gateway.remote.RemotePersistenceStats; import org.opensearch.index.recovery.RemoteStoreRestoreService; import org.opensearch.index.recovery.RemoteStoreRestoreService.RemoteRestoreResult; +import org.opensearch.index.remote.RemoteIndexPathUploader; import org.opensearch.node.Node; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.fs.FsRepository; @@ -473,20 +474,23 @@ public void testDataOnlyNodePersistence() throws Exception { ); Supplier remoteClusterStateServiceSupplier = () -> { if (isRemoteStoreClusterStateEnabled(settings)) { + Supplier repositoriesServiceSupplier = () -> new RepositoriesService( + settings, + clusterService, + transportService, + Collections.emptyMap(), + Collections.emptyMap(), + transportService.getThreadPool() + ); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); return new RemoteClusterStateService( nodeEnvironment.nodeId(), - () -> new RepositoriesService( - settings, - clusterService, - transportService, - Collections.emptyMap(), - Collections.emptyMap(), - transportService.getThreadPool() - ), + repositoriesServiceSupplier, settings, - new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + clusterSettings, () -> 0L, - threadPool + threadPool, + List.of(new RemoteIndexPathUploader(threadPool, settings, repositoriesServiceSupplier, clusterSettings)) ); } else { return null; diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 65477051cdb30..9f321cd62847c 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -38,6 +38,7 @@ import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; +import org.opensearch.index.remote.RemoteIndexPathUploader; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.indices.IndicesModule; import org.opensearch.repositories.FilterRepository; @@ -154,7 +155,8 @@ public void setup() { settings, clusterSettings, () -> 0L, - threadPool + threadPool, + List.of(new RemoteIndexPathUploader(threadPool, settings, repositoriesServiceSupplier, clusterSettings)) ); } @@ -173,15 +175,17 @@ public void testFailWriteFullMetadataNonClusterManagerNode() throws IOException public void testFailInitializationWhenRemoteStateDisabled() { final Settings settings = Settings.builder().build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); assertThrows( AssertionError.class, () -> new RemoteClusterStateService( "test-node-id", repositoriesServiceSupplier, settings, - new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + clusterSettings, () -> 0L, - threadPool + threadPool, + List.of(new RemoteIndexPathUploader(threadPool, settings, repositoriesServiceSupplier, clusterSettings)) ) ); } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteIndexPathTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteIndexPathTests.java new file mode 100644 index 0000000000000..8ddbd383756e7 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteIndexPathTests.java @@ -0,0 +1,140 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +import org.mockito.Mockito; + +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; + +public class RemoteIndexPathTests extends OpenSearchTestCase { + + /** + * This checks that the remote path contains paths only for segment and data/metadata/lock_files combination. + */ + public void testToXContentWithSegmentRepo() throws IOException { + RemoteIndexPath indexPath = new RemoteIndexPath( + "djjsid73he8yd7usduh", + 2, + new BlobPath().add("djsd878ndjh").add("hcs87cj8"), + PathType.HASHED_PREFIX, + PathHashAlgorithm.FNV_1A_BASE64, + RemoteIndexPath.SEGMENT_PATH + ); + XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); + xContentBuilder.startObject(); + xContentBuilder = indexPath.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); + xContentBuilder.endObject(); + String expected = + "{\"version\":\"1\",\"index_uuid\":\"djjsid73he8yd7usduh\",\"shard_count\":2,\"path_type\":\"HASHED_PREFIX\",\"path_hash_algorithm\":\"FNV_1A_BASE64\",\"path_creation_map\":{\"segments\":[\"data\",\"metadata\",\"lock_files\"]},\"paths\":[\"9BmBinD5HYs/djsd878ndjh/hcs87cj8/djjsid73he8yd7usduh/0/segments/data/\",\"ExCNOD8_5ew/djsd878ndjh/hcs87cj8/djjsid73he8yd7usduh/1/segments/data/\",\"z8wtf0yr2l4/djsd878ndjh/hcs87cj8/djjsid73he8yd7usduh/0/segments/metadata/\",\"VheHVwFlExE/djsd878ndjh/hcs87cj8/djjsid73he8yd7usduh/1/segments/metadata/\",\"IgFKbsDeUpQ/djsd878ndjh/hcs87cj8/djjsid73he8yd7usduh/0/segments/lock_files/\",\"pA3gy_GZtns/djsd878ndjh/hcs87cj8/djjsid73he8yd7usduh/1/segments/lock_files/\"]}"; + assertEquals(expected, xContentBuilder.toString()); + } + + /** + * This checks that the remote path contains paths only for translog and data/metadata combination. + */ + public void testToXContentForTranslogRepoOnly() throws IOException { + RemoteIndexPath indexPath = new RemoteIndexPath( + "djjsid73he8yd7usduh", + 2, + new BlobPath().add("djsd878ndjh").add("hcs87cj8"), + PathType.HASHED_PREFIX, + PathHashAlgorithm.FNV_1A_BASE64, + RemoteIndexPath.TRANSLOG_PATH + ); + XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); + xContentBuilder.startObject(); + xContentBuilder = indexPath.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); + xContentBuilder.endObject(); + String expected = + "{\"version\":\"1\",\"index_uuid\":\"djjsid73he8yd7usduh\",\"shard_count\":2,\"path_type\":\"HASHED_PREFIX\",\"path_hash_algorithm\":\"FNV_1A_BASE64\",\"path_creation_map\":{\"translog\":[\"data\",\"metadata\"]},\"paths\":[\"2EaVODaKBck/djsd878ndjh/hcs87cj8/djjsid73he8yd7usduh/0/translog/data/\",\"dTS2VqEOUNo/djsd878ndjh/hcs87cj8/djjsid73he8yd7usduh/1/translog/data/\",\"PVNKNGonmZw/djsd878ndjh/hcs87cj8/djjsid73he8yd7usduh/0/translog/metadata/\",\"NXmt0Y6NjA8/djsd878ndjh/hcs87cj8/djjsid73he8yd7usduh/1/translog/metadata/\"]}"; + assertEquals(expected, xContentBuilder.toString()); + } + + /** + * This checks that the remote path contains paths only for translog and data/metadata combination. + */ + public void testToXContentForBothRepos() throws IOException { + Map> pathCreationMap = new TreeMap<>(); + pathCreationMap.putAll(RemoteIndexPath.TRANSLOG_PATH); + pathCreationMap.putAll(RemoteIndexPath.SEGMENT_PATH); + RemoteIndexPath indexPath = new RemoteIndexPath( + "csbdqiu8a7sdnjdks", + 3, + new BlobPath().add("nxf9yv0").add("c3ejoi"), + PathType.HASHED_PREFIX, + PathHashAlgorithm.FNV_1A_BASE64, + pathCreationMap + ); + XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); + xContentBuilder.startObject(); + xContentBuilder = indexPath.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); + xContentBuilder.endObject(); + String expected = + "{\"version\":\"1\",\"index_uuid\":\"csbdqiu8a7sdnjdks\",\"shard_count\":3,\"path_type\":\"HASHED_PREFIX\",\"path_hash_algorithm\":\"FNV_1A_BASE64\",\"path_creation_map\":{\"translog\":[\"data\",\"metadata\"],\"segments\":[\"data\",\"metadata\",\"lock_files\"]},\"paths\":[\"Cjo0F6kNjYk/nxf9yv0/c3ejoi/csbdqiu8a7sdnjdks/0/segments/data/\",\"kpayyhxct1I/nxf9yv0/c3ejoi/csbdqiu8a7sdnjdks/1/segments/data/\",\"p2RlgnHeIgc/nxf9yv0/c3ejoi/csbdqiu8a7sdnjdks/2/segments/data/\",\"gkPIurBtB1w/nxf9yv0/c3ejoi/csbdqiu8a7sdnjdks/0/segments/metadata/\",\"Y4YhlbxAB1c/nxf9yv0/c3ejoi/csbdqiu8a7sdnjdks/1/segments/metadata/\",\"HYc8fyVPouI/nxf9yv0/c3ejoi/csbdqiu8a7sdnjdks/2/segments/metadata/\",\"igzyZCz1ysI/nxf9yv0/c3ejoi/csbdqiu8a7sdnjdks/0/segments/lock_files/\",\"uEluEiYmptk/nxf9yv0/c3ejoi/csbdqiu8a7sdnjdks/1/segments/lock_files/\",\"TfAD8f06_7A/nxf9yv0/c3ejoi/csbdqiu8a7sdnjdks/2/segments/lock_files/\",\"QqKEpasbEGs/nxf9yv0/c3ejoi/csbdqiu8a7sdnjdks/0/translog/data/\",\"sNyoimoe1Bw/nxf9yv0/c3ejoi/csbdqiu8a7sdnjdks/1/translog/data/\",\"d4YQtONfq50/nxf9yv0/c3ejoi/csbdqiu8a7sdnjdks/2/translog/data/\",\"zLr4UXjK8T4/nxf9yv0/c3ejoi/csbdqiu8a7sdnjdks/0/translog/metadata/\",\"_s8i7ZmlXGE/nxf9yv0/c3ejoi/csbdqiu8a7sdnjdks/1/translog/metadata/\",\"tvtD3-k5ISg/nxf9yv0/c3ejoi/csbdqiu8a7sdnjdks/2/translog/metadata/\"]}"; + assertEquals(expected, xContentBuilder.toString()); + } + + public void testRemoteIndexPathWithInvalidPathCreationMap() throws IOException { + IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> new RemoteIndexPath( + "djjsid73he8yd7usduh", + 2, + new BlobPath().add("djsd878ndjh").add("hcs87cj8"), + PathType.HASHED_PREFIX, + PathHashAlgorithm.FNV_1A_BASE64, + new HashMap<>() + ) + ); + assertEquals( + "Invalid input in RemoteIndexPath constructor indexUUID=djjsid73he8yd7usduh shardCount=2 " + + "basePath=[djsd878ndjh][hcs87cj8] pathType=HASHED_PREFIX pathHashAlgorithm=FNV_1A_BASE64 pathCreationMap={}", + ex.getMessage() + ); + } + + public void testFromXContent() { + UnsupportedOperationException ex = assertThrows( + UnsupportedOperationException.class, + () -> RemoteIndexPath.fromXContent(Mockito.mock(XContentParser.class)) + ); + assertEquals("RemoteIndexPath.fromXContent() is not supported", ex.getMessage()); + } + + public void testInvalidPathCreationMap() { + IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> new RemoteIndexPath( + "djjsid73he8yd7usduh", + 2, + new BlobPath().add("djsd878ndjh").add("hcs87cj8"), + PathType.HASHED_PREFIX, + PathHashAlgorithm.FNV_1A_BASE64, + Map.of(TRANSLOG, List.of(LOCK_FILES)) + ) + ); + assertEquals("pathCreationMap={TRANSLOG=[LOCK_FILES]} is having illegal combination of category and type", ex.getMessage()); + } +} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteIndexPathUploaderTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteIndexPathUploaderTests.java new file mode 100644 index 0000000000000..2e4dd15ccb581 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteIndexPathUploaderTests.java @@ -0,0 +1,314 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.SetOnce; +import org.opensearch.common.UUIDs; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.compress.DeflateCompressor; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.gateway.remote.RemoteClusterStateService.RemoteStateTransferException; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +import org.mockito.Mockito; + +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.FIXED; +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_INFIX; +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_PREFIX; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class RemoteIndexPathUploaderTests extends OpenSearchTestCase { + + private static final String CLUSTER_STATE_REPO_KEY = Node.NODE_ATTRIBUTES.getKey() + + RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; + + private static final String TRANSLOG_REPO_NAME = "translog-repo"; + private static final String SEGMENT_REPO_NAME = "segment-repo"; + + private final ThreadPool threadPool = new TestThreadPool(getTestName()); + private Settings settings; + private ClusterSettings clusterSettings; + private RepositoriesService repositoriesService; + private BlobStoreRepository repository; + private BlobStore blobStore; + private BlobContainer blobContainer; + private BlobPath basePath; + private List indexMetadataList; + private final AtomicLong successCount = new AtomicLong(); + private final AtomicLong failureCount = new AtomicLong(); + + @Before + public void setup() { + settings = Settings.builder() + .put(RemoteIndexPathUploader.TRANSLOG_REPO_NAME_KEY, TRANSLOG_REPO_NAME) + .put(RemoteIndexPathUploader.SEGMENT_REPO_NAME_KEY, TRANSLOG_REPO_NAME) + .put(CLUSTER_STATE_REPO_KEY, TRANSLOG_REPO_NAME) + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + basePath = BlobPath.cleanPath().add("test"); + repositoriesService = mock(RepositoriesService.class); + repository = mock(BlobStoreRepository.class); + when(repositoriesService.repository(anyString())).thenReturn(repository); + blobStore = mock(BlobStore.class); + when(repository.blobStore()).thenReturn(blobStore); + when(repositoriesService.repository(TRANSLOG_REPO_NAME)).thenReturn(repository); + when(repository.basePath()).thenReturn(basePath); + when(repository.getCompressor()).thenReturn(new DeflateCompressor()); + blobContainer = mock(BlobContainer.class); + when(blobStore.blobContainer(any(BlobPath.class))).thenReturn(blobContainer); + + Map remoteCustomData = Map.of( + PathType.NAME, + HASHED_PREFIX.name(), + RemoteStoreEnums.PathHashAlgorithm.NAME, + RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64.name() + ); + Settings idxSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .build(); + IndexMetadata indexMetadata = new IndexMetadata.Builder("test").settings(idxSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .putCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY, remoteCustomData) + .build(); + indexMetadataList = List.of(indexMetadata); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + terminate(threadPool); + } + + public void testInterceptWithNoRemoteDataAttributes() { + Settings settings = Settings.Builder.EMPTY_SETTINGS; + clusterSettings.applySettings(settings); + RemoteIndexPathUploader remoteIndexPathUploader = new RemoteIndexPathUploader( + threadPool, + settings, + () -> repositoriesService, + clusterSettings + ); + List indexMetadataList = Mockito.mock(List.class); + ActionListener actionListener = ActionListener.wrap( + res -> successCount.incrementAndGet(), + ex -> failureCount.incrementAndGet() + ); + remoteIndexPathUploader.doOnNewIndexUpload(indexMetadataList, actionListener); + assertEquals(1, successCount.get()); + assertEquals(0, failureCount.get()); + verify(indexMetadataList, times(0)).stream(); + } + + public void testInterceptWithEmptyIndexMetadataList() { + RemoteIndexPathUploader remoteIndexPathUploader = new RemoteIndexPathUploader( + threadPool, + settings, + () -> repositoriesService, + clusterSettings + ); + remoteIndexPathUploader.start(); + ActionListener actionListener = ActionListener.wrap( + res -> successCount.incrementAndGet(), + ex -> failureCount.incrementAndGet() + ); + remoteIndexPathUploader.doOnNewIndexUpload(Collections.emptyList(), actionListener); + assertEquals(1, successCount.get()); + assertEquals(0, failureCount.get()); + } + + public void testInterceptWithEmptyEligibleIndexMetadataList() { + RemoteIndexPathUploader remoteIndexPathUploader = new RemoteIndexPathUploader( + threadPool, + settings, + () -> repositoriesService, + clusterSettings + ); + remoteIndexPathUploader.start(); + ActionListener actionListener = ActionListener.wrap( + res -> successCount.incrementAndGet(), + ex -> failureCount.incrementAndGet() + ); + + // Case 1 - Null remoteCustomData + List indexMetadataList = new ArrayList<>(); + IndexMetadata indexMetadata = mock(IndexMetadata.class); + indexMetadataList.add(indexMetadata); + remoteIndexPathUploader.doOnNewIndexUpload(indexMetadataList, actionListener); + assertEquals(1, successCount.get()); + assertEquals(0, failureCount.get()); + + // Case 2 - Empty remoteCustomData + when(indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY)).thenReturn(new HashMap<>()); + remoteIndexPathUploader.doOnNewIndexUpload(indexMetadataList, actionListener); + assertEquals(2, successCount.get()); + assertEquals(0, failureCount.get()); + + // Case 3 - RemoteStoreEnums.PathType.NAME not in remoteCustomData map + Map remoteCustomData = Map.of("test", "test"); + when(indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY)).thenReturn(remoteCustomData); + remoteIndexPathUploader.doOnNewIndexUpload(indexMetadataList, actionListener); + assertEquals(3, successCount.get()); + assertEquals(0, failureCount.get()); + + // Case 4 - RemoteStoreEnums.PathType.NAME is not HASHED_PREFIX + remoteCustomData = Map.of(PathType.NAME, randomFrom(FIXED, HASHED_INFIX).name()); + when(indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY)).thenReturn(remoteCustomData); + remoteIndexPathUploader.doOnNewIndexUpload(indexMetadataList, actionListener); + assertEquals(4, successCount.get()); + assertEquals(0, failureCount.get()); + } + + public void testInterceptWithSameRepo() throws IOException { + RemoteIndexPathUploader remoteIndexPathUploader = new RemoteIndexPathUploader( + threadPool, + settings, + () -> repositoriesService, + clusterSettings + ); + remoteIndexPathUploader.start(); + ActionListener actionListener = ActionListener.wrap( + res -> successCount.incrementAndGet(), + ex -> failureCount.incrementAndGet() + ); + remoteIndexPathUploader.doOnNewIndexUpload(indexMetadataList, actionListener); + assertEquals(1, successCount.get()); + assertEquals(0, failureCount.get()); + verify(blobContainer, times(1)).writeBlob(anyString(), any(InputStream.class), anyLong(), anyBoolean()); + } + + public void testInterceptWithDifferentRepo() throws IOException { + Settings settings = Settings.builder() + .put(this.settings) + .put(RemoteIndexPathUploader.SEGMENT_REPO_NAME_KEY, SEGMENT_REPO_NAME) + .build(); + when(repositoriesService.repository(SEGMENT_REPO_NAME)).thenReturn(repository); + RemoteIndexPathUploader remoteIndexPathUploader = new RemoteIndexPathUploader( + threadPool, + settings, + () -> repositoriesService, + clusterSettings + ); + remoteIndexPathUploader.start(); + ActionListener actionListener = ActionListener.wrap( + res -> successCount.incrementAndGet(), + ex -> failureCount.incrementAndGet() + ); + remoteIndexPathUploader.doOnNewIndexUpload(indexMetadataList, actionListener); + assertEquals(1, successCount.get()); + assertEquals(0, failureCount.get()); + verify(blobContainer, times(2)).writeBlob(anyString(), any(InputStream.class), anyLong(), anyBoolean()); + } + + public void testInterceptWithLatchAwaitTimeout() throws IOException { + blobContainer = mock(AsyncMultiStreamBlobContainer.class); + when(blobStore.blobContainer(any(BlobPath.class))).thenReturn(blobContainer); + RemoteIndexPathUploader remoteIndexPathUploader = new RemoteIndexPathUploader( + threadPool, + settings, + () -> repositoriesService, + clusterSettings + ); + remoteIndexPathUploader.start(); + + Settings settings = Settings.builder() + .put(this.settings) + .put(RemoteClusterStateService.INDEX_METADATA_UPLOAD_TIMEOUT_SETTING.getKey(), TimeValue.ZERO) + .build(); + clusterSettings.applySettings(settings); + SetOnce exceptionSetOnce = new SetOnce<>(); + ActionListener actionListener = ActionListener.wrap(res -> successCount.incrementAndGet(), ex -> { + failureCount.incrementAndGet(); + exceptionSetOnce.set(ex); + }); + remoteIndexPathUploader.doOnNewIndexUpload(indexMetadataList, actionListener); + assertEquals(0, successCount.get()); + assertEquals(1, failureCount.get()); + assertTrue(exceptionSetOnce.get() instanceof RemoteStateTransferException); + assertTrue( + exceptionSetOnce.get().getMessage().contains("Timed out waiting while uploading remote index path file for indexes=[test/") + ); + verify(blobContainer, times(0)).writeBlob(anyString(), any(InputStream.class), anyLong(), anyBoolean()); + } + + public void testInterceptWithInterruptedExceptionDuringLatchAwait() throws Exception { + AsyncMultiStreamBlobContainer asyncMultiStreamBlobContainer = mock(AsyncMultiStreamBlobContainer.class); + when(blobStore.blobContainer(any(BlobPath.class))).thenReturn(asyncMultiStreamBlobContainer); + RemoteIndexPathUploader remoteIndexPathUploader = new RemoteIndexPathUploader( + threadPool, + settings, + () -> repositoriesService, + clusterSettings + ); + remoteIndexPathUploader.start(); + Settings settings = Settings.builder() + .put(this.settings) + .put(RemoteClusterStateService.INDEX_METADATA_UPLOAD_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .build(); + clusterSettings.applySettings(settings); + SetOnce exceptionSetOnce = new SetOnce<>(); + ActionListener actionListener = ActionListener.wrap(res -> successCount.incrementAndGet(), ex -> { + failureCount.incrementAndGet(); + exceptionSetOnce.set(ex); + }); + Thread thread = new Thread(() -> { + try { + remoteIndexPathUploader.onNewIndexUpload(indexMetadataList, actionListener); + } catch (Exception e) { + assertTrue(e instanceof InterruptedException); + assertEquals("sleep interrupted", e.getMessage()); + } + }); + thread.start(); + Thread.sleep(10); + thread.interrupt(); + + assertBusy(() -> { + assertEquals(0, successCount.get()); + assertEquals(1, failureCount.get()); + }); + } + +} From 7c6127a1c67021a63b4356fe78e4223f5662f9f2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 Apr 2024 13:52:23 -0700 Subject: [PATCH 29/37] Bump jakarta.enterprise:jakarta.enterprise.cdi-api from 4.0.1 to 4.1.0 in /qa/wildfly (#13328) * Bump jakarta.enterprise:jakarta.enterprise.cdi-api in /qa/wildfly Bumps [jakarta.enterprise:jakarta.enterprise.cdi-api](https://github.com/cdi-spec/cdi) from 4.0.1 to 4.1.0. - [Release notes](https://github.com/cdi-spec/cdi/releases) - [Commits](https://github.com/cdi-spec/cdi/compare/4.0.1...4.1.0) --- updated-dependencies: - dependency-name: jakarta.enterprise:jakarta.enterprise.cdi-api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + qa/wildfly/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 22c3e0d787f58..5fbb705059b07 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,6 +43,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.apis:google-api-services-compute` from v1-rev235-1.25.0 to v1-rev20240407-2.0.0 ([#13333](https://github.com/opensearch-project/OpenSearch/pull/13333)) - Bump `commons-cli:commons-cli` from 1.6.0 to 1.7.0 ([#13331](https://github.com/opensearch-project/OpenSearch/pull/13331)) - Bump `com.github.spullara.mustache.java:compiler` from 0.9.10 to 0.9.11 ([#13329](https://github.com/opensearch-project/OpenSearch/pull/13329)) +- Bump `jakarta.enterprise:jakarta.enterprise.cdi-api` from 4.0.1 to 4.1.0 ([#13328](https://github.com/opensearch-project/OpenSearch/pull/13328)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) diff --git a/qa/wildfly/build.gradle b/qa/wildfly/build.gradle index 5d37be47e782e..abf033fff378a 100644 --- a/qa/wildfly/build.gradle +++ b/qa/wildfly/build.gradle @@ -40,7 +40,7 @@ apply plugin: 'opensearch.internal-distribution-download' testFixtures.useFixture() dependencies { - providedCompile('jakarta.enterprise:jakarta.enterprise.cdi-api:4.0.1') { + providedCompile('jakarta.enterprise:jakarta.enterprise.cdi-api:4.1.0') { exclude module: 'jakarta.annotation-api' } providedCompile 'jakarta.ws.rs:jakarta.ws.rs-api:3.1.0' From 9e62ccf545d253516737b43edb30d79dcb9aed03 Mon Sep 17 00:00:00 2001 From: Naomichi Yamakita Date: Wed, 24 Apr 2024 07:42:12 +0900 Subject: [PATCH 30/37] Fix flat_object parsing error when an object field name is too long (#13259) --------- Signed-off-by: naomichi-y --- CHANGELOG.md | 1 + .../xcontent/JsonToStringXContentParser.java | 9 ++-- .../mapper/FlatObjectFieldDataTests.java | 43 +++++++++++++++++++ 3 files changed, 49 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5fbb705059b07..bb128bf5cfd77 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -69,6 +69,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix snapshot _status API to return correct status for partial snapshots ([#12812](https://github.com/opensearch-project/OpenSearch/pull/12812)) - Improve the error messages for _stats with closed indices ([#13012](https://github.com/opensearch-project/OpenSearch/pull/13012)) - Ignore BaseRestHandler unconsumed content check as it's always consumed. ([#13290](https://github.com/opensearch-project/OpenSearch/pull/13290)) +- Fix mapper_parsing_exception when using flat_object fields with names longer than 11 characters ([#13259](https://github.com/opensearch-project/OpenSearch/pull/13259)) ### Security diff --git a/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java b/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java index 9b2bd06a88e2e..998122d9e5c43 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java +++ b/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java @@ -106,8 +106,9 @@ private void parseToken(StringBuilder path, String currentFieldName) throws IOEx // skip } else if (this.parser.currentToken() == Token.START_OBJECT) { parseToken(path, currentFieldName); - int dotIndex = path.lastIndexOf(DOT_SYMBOL); - if (dotIndex != -1) { + int dotIndex = path.lastIndexOf(DOT_SYMBOL, path.length()); + + if (dotIndex != -1 && path.length() > currentFieldName.length()) { path.setLength(path.length() - currentFieldName.length() - 1); } } else { @@ -117,8 +118,8 @@ private void parseToken(StringBuilder path, String currentFieldName) throws IOEx parseValue(parsedFields); this.valueList.add(parsedFields.toString()); this.valueAndPathList.add(path + EQUAL_SYMBOL + parsedFields); - int dotIndex = path.lastIndexOf(DOT_SYMBOL); - if (dotIndex != -1) { + int dotIndex = path.lastIndexOf(DOT_SYMBOL, path.length()); + if (dotIndex != -1 && path.length() > currentFieldName.length()) { path.setLength(path.length() - currentFieldName.length() - 1); } } diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldDataTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldDataTests.java index e318ca5e953a3..03ce1eb7252de 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldDataTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldDataTests.java @@ -54,6 +54,49 @@ public void testDocValue() throws Exception { assertEquals(1, valueReaders.size()); } + public void testLongFieldNameWithHashArray() throws Exception { + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("test") + .startObject("properties") + .startObject("field") + .field("type", FIELD_TYPE) + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); + final DocumentMapper mapper = mapperService.documentMapperParser().parse("test", new CompressedXContent(mapping)); + + XContentBuilder json = XContentFactory.jsonBuilder() + .startObject() + .startObject("field") + .startObject("detail") + .startArray("fooooooooooo") + .startObject() + .field("name", "baz") + .endObject() + .startObject() + .field("name", "baz") + .endObject() + .endArray() + .endObject() + .endObject() + .endObject(); + + ParsedDocument d = mapper.parse(new SourceToParse("test", "1", BytesReference.bytes(json), MediaTypeRegistry.JSON)); + writer.addDocument(d.rootDoc()); + writer.commit(); + + IndexFieldData fieldData = getForField("field"); + List readers = refreshReader(); + assertEquals(1, readers.size()); + + IndexFieldData valueFieldData = getForField("field._value"); + List valueReaders = refreshReader(); + assertEquals(1, valueReaders.size()); + } + @Override protected String getFieldDataType() { return FIELD_TYPE; From 76e4c86af61de81b5d54d0440e7210913a8c2d8f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 24 Apr 2024 10:25:36 -0700 Subject: [PATCH 31/37] Bump com.google.api.grpc:proto-google-iam-v1 from 0.12.0 to 1.33.0 in /plugins/repository-gcs (#13332) * Bump com.google.api.grpc:proto-google-iam-v1 in /plugins/repository-gcs Bumps [com.google.api.grpc:proto-google-iam-v1](https://github.com/googleapis/sdk-platform-java) from 0.12.0 to 1.33.0. - [Release notes](https://github.com/googleapis/sdk-platform-java/releases) - [Changelog](https://github.com/googleapis/sdk-platform-java/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/sdk-platform-java/commits) --- updated-dependencies: - dependency-name: com.google.api.grpc:proto-google-iam-v1 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-gcs/build.gradle | 2 +- .../repository-gcs/licenses/proto-google-iam-v1-0.12.0.jar.sha1 | 1 - .../repository-gcs/licenses/proto-google-iam-v1-1.33.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/proto-google-iam-v1-0.12.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/proto-google-iam-v1-1.33.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index bb128bf5cfd77..db73c0e8caf21 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `commons-cli:commons-cli` from 1.6.0 to 1.7.0 ([#13331](https://github.com/opensearch-project/OpenSearch/pull/13331)) - Bump `com.github.spullara.mustache.java:compiler` from 0.9.10 to 0.9.11 ([#13329](https://github.com/opensearch-project/OpenSearch/pull/13329)) - Bump `jakarta.enterprise:jakarta.enterprise.cdi-api` from 4.0.1 to 4.1.0 ([#13328](https://github.com/opensearch-project/OpenSearch/pull/13328)) +- Bump `com.google.api.grpc:proto-google-iam-v1` from 0.12.0 to 1.33.0 ([#13332](https://github.com/opensearch-project/OpenSearch/pull/13332)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index c4b1ab8d6875e..110df89f25de8 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -61,7 +61,7 @@ dependencies { api 'com.google.api-client:google-api-client:2.2.0' api 'com.google.api.grpc:proto-google-common-protos:2.37.1' - api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' + api 'com.google.api.grpc:proto-google-iam-v1:1.33.0' api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" api "com.google.auth:google-auth-library-oauth2-http:${versions.google_auth}" diff --git a/plugins/repository-gcs/licenses/proto-google-iam-v1-0.12.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-iam-v1-0.12.0.jar.sha1 deleted file mode 100644 index 2bfae3456d499..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-iam-v1-0.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea312c0250a5d0a7cdd1b20bc2c3259938b79855 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-iam-v1-1.33.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-iam-v1-1.33.0.jar.sha1 new file mode 100644 index 0000000000000..ba04056c54697 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-iam-v1-1.33.0.jar.sha1 @@ -0,0 +1 @@ +4766da92d1f36c8b612c1c142d5f3ace3774f098 \ No newline at end of file From db361ecead1d3c945371c84f4e0ea915c7abcc57 Mon Sep 17 00:00:00 2001 From: Kiran Prakash Date: Wed, 24 Apr 2024 14:50:39 -0700 Subject: [PATCH 32/37] [Tiered Caching] Bug fix for IndicesRequestCache StaleKey management (#13070) * Update IndicesRequestCache.java Signed-off-by: Kiran Prakash * Update IndicesRequestCacheTests.java Signed-off-by: Kiran Prakash * Update CHANGELOG.md Signed-off-by: Kiran Prakash * revert Signed-off-by: Kiran Prakash * revert Signed-off-by: Kiran Prakash * Update IndicesRequestCache.java Signed-off-by: Kiran Prakash * Update IndicesRequestCacheTests.java Signed-off-by: Kiran Prakash * Update IndicesRequestCache.java Signed-off-by: Kiran Prakash * Update IndicesRequestCacheTests.java Signed-off-by: Kiran Prakash * Update IndicesRequestCache.java Signed-off-by: Kiran Prakash * Update IndicesRequestCacheTests.java Signed-off-by: Kiran Prakash * Update IndicesRequestCacheTests.java Signed-off-by: Kiran Prakash * Update IndicesRequestCacheTests.java Signed-off-by: Kiran Prakash * Update IndicesRequestCacheTests.java Signed-off-by: Kiran Prakash * Update IndicesRequestCache.java Signed-off-by: Kiran Prakash * Update IndicesRequestCacheTests.java Signed-off-by: Kiran Prakash * Update IndicesRequestCacheTests.java Signed-off-by: Kiran Prakash * Update IndicesRequestCache.java Signed-off-by: Kiran Prakash * Update IndicesRequestCacheTests.java Signed-off-by: Kiran Prakash * Update IndicesRequestCache.java Signed-off-by: Kiran Prakash * Update IndicesRequestCacheTests.java Signed-off-by: Kiran Prakash * code comments only Signed-off-by: Kiran Prakash * docs changes Signed-off-by: Kiran Prakash * Update CHANGELOG.md Signed-off-by: Kiran Prakash * revert catching AlreadyClosedException Signed-off-by: Kiran Prakash * assert Signed-off-by: Kiran Prakash * conflicts Signed-off-by: Kiran Prakash * Update IndicesRequestCacheTests.java Signed-off-by: Kiran Prakash * Update IndicesRequestCache.java Signed-off-by: Kiran Prakash * address comments Signed-off-by: Kiran Prakash * Update IndicesRequestCache.java Signed-off-by: Kiran Prakash * Update IndicesRequestCache.java Signed-off-by: Kiran Prakash * address conflicts Signed-off-by: Kiran Prakash * spotless apply Signed-off-by: Kiran Prakash * address comments Signed-off-by: Kiran Prakash * update code comments Signed-off-by: Kiran Prakash * address bug & add tests Signed-off-by: Kiran Prakash --------- Signed-off-by: Kiran Prakash --- CHANGELOG.md | 1 + .../indices/IndicesRequestCache.java | 92 +- .../indices/IndicesRequestCacheTests.java | 908 ++++++++---------- 3 files changed, 486 insertions(+), 515 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index db73c0e8caf21..b80741f24e275 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,6 +67,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix from and size parameter can be negative when searching ([#13047](https://github.com/opensearch-project/OpenSearch/pull/13047)) - Enabled mockTelemetryPlugin for IT and fixed OOM issues ([#13054](https://github.com/opensearch-project/OpenSearch/pull/13054)) - Fix implement mark() and markSupported() in class FilterStreamInput ([#13098](https://github.com/opensearch-project/OpenSearch/pull/13098)) +- Fix IndicesRequestCache Stale calculation ([#13070](https://github.com/opensearch-project/OpenSearch/pull/13070)] - Fix snapshot _status API to return correct status for partial snapshots ([#12812](https://github.com/opensearch-project/OpenSearch/pull/12812)) - Improve the error messages for _stats with closed indices ([#13012](https://github.com/opensearch-project/OpenSearch/pull/13012)) - Ignore BaseRestHandler unconsumed content check as it's always consumed. ([#13290](https://github.com/opensearch-project/OpenSearch/pull/13290)) diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index eab772cda3213..039e14a031f3f 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -47,6 +47,7 @@ import org.opensearch.common.cache.LoadAwareCacheLoader; import org.opensearch.common.cache.RemovalListener; import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.RemovalReason; import org.opensearch.common.cache.policy.CachedQueryResult; import org.opensearch.common.cache.serializer.BytesReferenceSerializer; import org.opensearch.common.cache.service.CacheService; @@ -216,7 +217,6 @@ void clear(CacheEntity entity) { public void onRemoval(RemovalNotification, BytesReference> notification) { // In case this event happens for an old shard, we can safely ignore this as we don't keep track for old // shards as part of request cache. - // Pass a new removal notification containing Key rather than ICacheKey to the CacheEntity for backwards compatibility. Key key = notification.getKey().key; RemovalNotification newNotification = new RemovalNotification<>( @@ -224,11 +224,9 @@ public void onRemoval(RemovalNotification, BytesReference> notifi notification.getValue(), notification.getRemovalReason() ); - cacheEntityLookup.apply(key.shardId).ifPresent(entity -> entity.onRemoval(newNotification)); - cacheCleanupManager.updateCleanupKeyToCountMapOnCacheEviction( - new CleanupKey(cacheEntityLookup.apply(key.shardId).orElse(null), key.readerCacheKeyId) - ); + CleanupKey cleanupKey = new CleanupKey(cacheEntityLookup.apply(key.shardId).orElse(null), key.readerCacheKeyId); + cacheCleanupManager.updateStaleCountOnEntryRemoval(cleanupKey, newNotification); } private ICacheKey getICacheKey(Key key) { @@ -272,10 +270,11 @@ BytesReference getOrCompute( OpenSearchDirectoryReader.addReaderCloseListener(reader, cleanupKey); } } - cacheCleanupManager.updateCleanupKeyToCountMapOnCacheInsertion(cleanupKey); + cacheCleanupManager.updateStaleCountOnCacheInsert(cleanupKey); } else { cacheEntity.onHit(); } + return value; } @@ -508,7 +507,7 @@ void enqueueCleanupKey(CleanupKey cleanupKey) { * * @param cleanupKey the CleanupKey to be updated in the map */ - private void updateCleanupKeyToCountMapOnCacheInsertion(CleanupKey cleanupKey) { + private void updateStaleCountOnCacheInsert(CleanupKey cleanupKey) { if (stalenessThreshold == 0.0 || cleanupKey.entity == null) { return; } @@ -524,8 +523,29 @@ private void updateCleanupKeyToCountMapOnCacheInsertion(CleanupKey cleanupKey) { cleanupKeyToCountMap.computeIfAbsent(shardId, k -> new HashMap<>()).merge(cleanupKey.readerCacheKeyId, 1, Integer::sum); } - private void updateCleanupKeyToCountMapOnCacheEviction(CleanupKey cleanupKey) { - if (stalenessThreshold == 0.0 || cleanupKey.entity == null) { + /** + * Handles the eviction of a cache entry. + * + *

This method is called when an entry is evicted from the cache. + * We consider all removal notifications except with the reason Replaced + * {@link #incrementStaleKeysCount} would have removed the entries from the map and increment the {@link #staleKeysCount} + * Hence we decrement {@link #staleKeysCount} if we do not find the shardId or readerCacheKeyId in the map. + * Skip decrementing staleKeysCount if we find the shardId or readerCacheKeyId in the map since it would have not been accounted for in the staleKeysCount in + * + * @param cleanupKey the CleanupKey that has been evicted from the cache + * @param notification RemovalNotification of the cache entry evicted + */ + private void updateStaleCountOnEntryRemoval(CleanupKey cleanupKey, RemovalNotification notification) { + if (notification.getRemovalReason() == RemovalReason.REPLACED) { + // The reason of the notification is REPLACED when a cache entry's value is updated, since replacing an entry + // does not affect the staleness count, we skip such notifications. + return; + } + if (cleanupKey.entity == null) { + // entity will only be null when the shard is closed/deleted + // we would have accounted this in staleKeysCount when the closing/deletion of shard would have closed the associated + // readers + staleKeysCount.decrementAndGet(); return; } IndexShard indexShard = (IndexShard) cleanupKey.entity.getCacheIdentity(); @@ -535,15 +555,33 @@ private void updateCleanupKeyToCountMapOnCacheEviction(CleanupKey cleanupKey) { } ShardId shardId = indexShard.shardId(); - cleanupKeyToCountMap.computeIfPresent(shardId, (shard, keyCountMap) -> { - keyCountMap.computeIfPresent(cleanupKey.readerCacheKeyId, (key, currentValue) -> { - // decrement the stale key count + cleanupKeyToCountMap.compute(shardId, (key, readerCacheKeyMap) -> { + if (readerCacheKeyMap == null || !readerCacheKeyMap.containsKey(cleanupKey.readerCacheKeyId)) { + // If ShardId is not present or readerCacheKeyId is not present + // it should have already been accounted for and hence been removed from this map + // so decrement staleKeysCount staleKeysCount.decrementAndGet(); - int newValue = currentValue - 1; - // Remove the key if the new value is zero by returning null; otherwise, update with the new value. - return newValue == 0 ? null : newValue; - }); - return keyCountMap; + // Return the current map + return readerCacheKeyMap; + } else { + // If it is in the map, it is not stale yet. + // Proceed to adjust the count for the readerCacheKeyId in the map + // but do not decrement the staleKeysCount + Integer count = readerCacheKeyMap.get(cleanupKey.readerCacheKeyId); + // this should never be null + assert (count != null && count >= 0); + // Reduce the count by 1 + int newCount = count - 1; + if (newCount > 0) { + // Update the map with the new count + readerCacheKeyMap.put(cleanupKey.readerCacheKeyId, newCount); + } else { + // Remove the readerCacheKeyId entry if new count is zero + readerCacheKeyMap.remove(cleanupKey.readerCacheKeyId); + } + // If after modification, the readerCacheKeyMap is empty, we return null to remove the ShardId entry + return readerCacheKeyMap.isEmpty() ? null : readerCacheKeyMap; + } }); } @@ -551,7 +589,7 @@ private void updateCleanupKeyToCountMapOnCacheEviction(CleanupKey cleanupKey) { * Updates the count of stale keys in the cache. * This method is called when a CleanupKey is added to the keysToClean set. * - * It increments the staleKeysCount by the count of the CleanupKey in the cleanupKeyToCountMap. + *

It increments the staleKeysCount by the count of the CleanupKey in the cleanupKeyToCountMap. * If the CleanupKey's readerCacheKeyId is null or the CleanupKey's entity is not open, it increments the staleKeysCount * by the total count of keys associated with the CleanupKey's ShardId in the cleanupKeyToCountMap and removes the ShardId from the map. * @@ -569,7 +607,7 @@ private void incrementStaleKeysCount(CleanupKey cleanupKey) { ShardId shardId = indexShard.shardId(); // Using computeIfPresent to atomically operate on the countMap for a given shardId - cleanupKeyToCountMap.computeIfPresent(shardId, (key, countMap) -> { + cleanupKeyToCountMap.computeIfPresent(shardId, (currentShardId, countMap) -> { if (cleanupKey.readerCacheKeyId == null) { // Aggregate and add to staleKeysCount atomically if readerCacheKeyId is null int totalSum = countMap.values().stream().mapToInt(Integer::intValue).sum(); @@ -578,18 +616,19 @@ private void incrementStaleKeysCount(CleanupKey cleanupKey) { return null; } else { // Update staleKeysCount based on specific readerCacheKeyId, then remove it from the countMap - countMap.computeIfPresent(cleanupKey.readerCacheKeyId, (k, v) -> { - staleKeysCount.addAndGet(v); + countMap.computeIfPresent(cleanupKey.readerCacheKeyId, (readerCacheKey, count) -> { + staleKeysCount.addAndGet(count); // Return null to remove the key after updating staleKeysCount return null; }); - // Check if countMap is empty after removal to decide if we need to remove the shardId entry if (countMap.isEmpty()) { - return null; // Returning null removes the entry for shardId + // Returning null removes the entry for shardId + return null; } } - return countMap; // Return the modified countMap to keep the mapping + // Return the modified countMap to retain updates + return countMap; }); } @@ -715,6 +754,11 @@ public void close() { this.cacheCleaner.close(); } + // for testing + ConcurrentMap> getCleanupKeyToCountMap() { + return cleanupKeyToCountMap; + } + private final class IndicesRequestCacheCleaner implements Runnable, Releasable { private final IndicesRequestCacheCleanupManager cacheCleanupManager; diff --git a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java index e3dca1b7bfda2..051acfe9d085a 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java @@ -38,7 +38,6 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; @@ -51,7 +50,6 @@ import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.RemovalReason; import org.opensearch.common.cache.module.CacheModule; -import org.opensearch.common.cache.service.CacheService; import org.opensearch.common.cache.stats.ImmutableCacheStats; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; @@ -77,46 +75,58 @@ import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; import java.util.List; import java.util.Optional; import java.util.UUID; +import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; import static org.opensearch.indices.IndicesRequestCache.INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class IndicesRequestCacheTests extends OpenSearchSingleNodeTestCase { + private ThreadPool threadPool; + private IndexWriter writer; + private Directory dir; + private IndicesRequestCache cache; + private IndexShard indexShard; + private ThreadPool getThreadPool() { return new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "default tracer tests").build()); } - public void testBasicOperationsCache() throws Exception { - IndexShard indexShard = createIndex("test").getShard(0); - ThreadPool threadPool = getThreadPool(); - IndicesRequestCache cache = new IndicesRequestCache( - Settings.EMPTY, - (shardId -> Optional.of(new IndicesService.IndexShardCacheEntity(indexShard))), - new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService(), - threadPool, - ClusterServiceUtils.createClusterService(threadPool) - ); - Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); + @Before + public void setup() throws IOException { + dir = newDirectory(); + writer = new IndexWriter(dir, newIndexWriterConfig()); + indexShard = createIndex("test").getShard(0); + } + @After + public void cleanup() throws IOException { + IOUtils.close(writer, dir, cache); + terminate(threadPool); + } + + public void testBasicOperationsCache() throws Exception { + threadPool = getThreadPool(); + cache = getIndicesRequestCache(Settings.EMPTY); writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + DirectoryReader reader = getReader(writer, indexShard.shardId()); // initial cache IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); Loader loader = new Loader(reader, 0); - BytesReference value = cache.getOrCompute(entity, loader, reader, termBytes); + BytesReference value = cache.getOrCompute(entity, loader, reader, getTermBytes()); assertEquals("foo", value.streamInput().readString()); ShardRequestCache requestCacheStats = indexShard.requestCache(); assertEquals(0, requestCacheStats.stats().getHitCount()); @@ -128,7 +138,7 @@ public void testBasicOperationsCache() throws Exception { // cache hit entity = new IndicesService.IndexShardCacheEntity(indexShard); loader = new Loader(reader, 0); - value = cache.getOrCompute(entity, loader, reader, termBytes); + value = cache.getOrCompute(entity, loader, reader, getTermBytes()); assertEquals("foo", value.streamInput().readString()); requestCacheStats = indexShard.requestCache(); assertEquals(1, requestCacheStats.stats().getHitCount()); @@ -154,34 +164,21 @@ public void testBasicOperationsCache() throws Exception { assertEquals(0, cache.count()); assertEquals(0, requestCacheStats.stats().getMemorySize().bytesAsInt()); - IOUtils.close(reader, writer, dir, cache); - terminate(threadPool); + IOUtils.close(reader); assertEquals(0, cache.numRegisteredCloseListeners()); } public void testBasicOperationsCacheWithFeatureFlag() throws Exception { - IndexShard indexShard = createIndex("test").getShard(0); - CacheService cacheService = new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService(); - ThreadPool threadPool = getThreadPool(); - IndicesRequestCache cache = new IndicesRequestCache( - Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.PLUGGABLE_CACHE, "true").build(), - (shardId -> Optional.of(new IndicesService.IndexShardCacheEntity(indexShard))), - cacheService, - threadPool, - ClusterServiceUtils.createClusterService(threadPool) - ); - Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); - + threadPool = getThreadPool(); + Settings settings = Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.PLUGGABLE_CACHE, "true").build(); + cache = getIndicesRequestCache(settings); writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + DirectoryReader reader = getReader(writer, indexShard.shardId()); // initial cache IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); Loader loader = new Loader(reader, 0); - BytesReference value = cache.getOrCompute(entity, loader, reader, termBytes); + BytesReference value = cache.getOrCompute(entity, loader, reader, getTermBytes()); assertEquals("foo", value.streamInput().readString()); ShardRequestCache requestCacheStats = indexShard.requestCache(); assertEquals(0, requestCacheStats.stats().getHitCount()); @@ -193,7 +190,7 @@ public void testBasicOperationsCacheWithFeatureFlag() throws Exception { // cache hit entity = new IndicesService.IndexShardCacheEntity(indexShard); loader = new Loader(reader, 0); - value = cache.getOrCompute(entity, loader, reader, termBytes); + value = cache.getOrCompute(entity, loader, reader, getTermBytes()); assertEquals("foo", value.streamInput().readString()); requestCacheStats = indexShard.requestCache(); assertEquals(1, requestCacheStats.stats().getHitCount()); @@ -219,47 +216,28 @@ public void testBasicOperationsCacheWithFeatureFlag() throws Exception { assertEquals(0, cache.count()); assertEquals(0, requestCacheStats.stats().getMemorySize().bytesAsInt()); - IOUtils.close(reader, writer, dir, cache); - terminate(threadPool); + IOUtils.close(reader); assertEquals(0, cache.numRegisteredCloseListeners()); } public void testCacheDifferentReaders() throws Exception { - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexShard indexShard = createIndex("test").getShard(0); - ThreadPool threadPool = getThreadPool(); - IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY, (shardId -> { - IndexService indexService = null; - try { - indexService = indicesService.indexServiceSafe(shardId.getIndex()); - } catch (IndexNotFoundException ex) { - return Optional.empty(); - } - return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); - }), - new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService(), - threadPool, - ClusterServiceUtils.createClusterService(threadPool) - ); - Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); - + threadPool = getThreadPool(); + cache = getIndicesRequestCache(Settings.EMPTY); writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + DirectoryReader reader = getReader(writer, indexShard.shardId()); + if (randomBoolean()) { writer.flush(); IOUtils.close(writer); writer = new IndexWriter(dir, newIndexWriterConfig()); } writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); - DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + DirectoryReader secondReader = getReader(writer, indexShard.shardId()); // initial cache IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); Loader loader = new Loader(reader, 0); - BytesReference value = cache.getOrCompute(entity, loader, reader, termBytes); + BytesReference value = cache.getOrCompute(entity, loader, reader, getTermBytes()); ShardRequestCache requestCacheStats = entity.stats(); assertEquals("foo", value.streamInput().readString()); assertEquals(0, requestCacheStats.stats().getHitCount()); @@ -274,7 +252,7 @@ public void testCacheDifferentReaders() throws Exception { // cache the second IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); loader = new Loader(secondReader, 0); - value = cache.getOrCompute(entity, loader, secondReader, termBytes); + value = cache.getOrCompute(entity, loader, secondReader, getTermBytes()); requestCacheStats = entity.stats(); assertEquals("bar", value.streamInput().readString()); assertEquals(0, requestCacheStats.stats().getHitCount()); @@ -287,7 +265,7 @@ public void testCacheDifferentReaders() throws Exception { secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); loader = new Loader(secondReader, 0); - value = cache.getOrCompute(secondEntity, loader, secondReader, termBytes); + value = cache.getOrCompute(secondEntity, loader, secondReader, getTermBytes()); requestCacheStats = entity.stats(); assertEquals("bar", value.streamInput().readString()); assertEquals(1, requestCacheStats.stats().getHitCount()); @@ -298,7 +276,7 @@ public void testCacheDifferentReaders() throws Exception { entity = new IndicesService.IndexShardCacheEntity(indexShard); loader = new Loader(reader, 0); - value = cache.getOrCompute(entity, loader, reader, termBytes); + value = cache.getOrCompute(entity, loader, reader, getTermBytes()); assertEquals("foo", value.streamInput().readString()); requestCacheStats = entity.stats(); assertEquals(2, requestCacheStats.stats().getHitCount()); @@ -331,8 +309,7 @@ public void testCacheDifferentReaders() throws Exception { assertEquals(0, cache.count()); assertEquals(0, requestCacheStats.stats().getMemorySize().bytesAsInt()); - IOUtils.close(secondReader, writer, dir, cache); - terminate(threadPool); + IOUtils.close(secondReader); assertEquals(0, cache.numRegisteredCloseListeners()); } @@ -359,55 +336,20 @@ public void testCacheCleanupThresholdSettingValidator_Invalid_Percentage() { assertThrows(IllegalArgumentException.class, () -> { IndicesRequestCache.validateStalenessSetting("500%"); }); } + // when staleness threshold is zero, stale keys should be cleaned up every time cache cleaner is invoked. public void testCacheCleanupBasedOnZeroThreshold() throws Exception { - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexShard indexShard = createIndex("test").getShard(0); - ThreadPool threadPool = getThreadPool(); + threadPool = getThreadPool(); Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0%").build(); - IndicesRequestCache cache = new IndicesRequestCache(settings, (shardId -> { - IndexService indexService = null; - try { - indexService = indicesService.indexServiceSafe(shardId.getIndex()); - } catch (IndexNotFoundException ex) { - return Optional.empty(); - } - return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); - }), - new CacheModule(new ArrayList<>(), settings).getCacheService(), - threadPool, - ClusterServiceUtils.createClusterService(threadPool) - ); - Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); - + cache = getIndicesRequestCache(settings); writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); - if (randomBoolean()) { - writer.flush(); - IOUtils.close(writer); - writer = new IndexWriter(dir, newIndexWriterConfig()); - } - writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); - DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + DirectoryReader reader = getReader(writer, indexShard.shardId()); + DirectoryReader secondReader = getReader(writer, indexShard.shardId()); // Get 2 entries into the cache - IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); - Loader loader = new Loader(reader, 0); - cache.getOrCompute(entity, loader, reader, termBytes); - - entity = new IndicesService.IndexShardCacheEntity(indexShard); - loader = new Loader(reader, 0); - cache.getOrCompute(entity, loader, reader, termBytes); - - IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); - loader = new Loader(secondReader, 0); - cache.getOrCompute(entity, loader, secondReader, termBytes); + cache.getOrCompute(getEntity(indexShard), getLoader(reader), reader, getTermBytes()); + assertEquals(1, cache.count()); - secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); - loader = new Loader(secondReader, 0); - cache.getOrCompute(secondEntity, loader, secondReader, termBytes); + cache.getOrCompute(getEntity(indexShard), getLoader(secondReader), secondReader, getTermBytes()); assertEquals(2, cache.count()); // Close the reader, to be enqueued for cleanup @@ -419,209 +361,148 @@ public void testCacheCleanupBasedOnZeroThreshold() throws Exception { cache.cacheCleanupManager.cleanCache(); // cleanup should remove the stale-key assertEquals(1, cache.count()); - - IOUtils.close(secondReader, writer, dir, cache); - terminate(threadPool); + IOUtils.close(secondReader); } - public void testCacheCleanupBasedOnStaleThreshold_StalenessEqualToThreshold() throws Exception { - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexShard indexShard = createIndex("test").getShard(0); - ThreadPool threadPool = getThreadPool(); - Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0.5").build(); - IndicesRequestCache cache = new IndicesRequestCache(settings, (shardId -> { - IndexService indexService = null; - try { - indexService = indicesService.indexServiceSafe(shardId.getIndex()); - } catch (IndexNotFoundException ex) { - return Optional.empty(); - } - return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); - }), - new CacheModule(new ArrayList<>(), settings).getCacheService(), - threadPool, - ClusterServiceUtils.createClusterService(threadPool) - ); - Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); + // when staleness count is higher than stale threshold, stale keys should be cleaned up. + public void testCacheCleanupBasedOnStaleThreshold_StalenessHigherThanThreshold() throws Exception { + threadPool = getThreadPool(); + Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0.49").build(); + cache = getIndicesRequestCache(settings); writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); - if (randomBoolean()) { - writer.flush(); - IOUtils.close(writer); - writer = new IndexWriter(dir, newIndexWriterConfig()); - } - writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); - DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + DirectoryReader reader = getReader(writer, indexShard.shardId()); + DirectoryReader secondReader = getReader(writer, indexShard.shardId()); // Get 2 entries into the cache - IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); - Loader loader = new Loader(reader, 0); - cache.getOrCompute(entity, loader, reader, termBytes); + cache.getOrCompute(getEntity(indexShard), getLoader(reader), reader, getTermBytes()); + assertEquals(1, cache.count()); - entity = new IndicesService.IndexShardCacheEntity(indexShard); - loader = new Loader(reader, 0); - cache.getOrCompute(entity, loader, reader, termBytes); + cache.getOrCompute(getEntity(indexShard), getLoader(secondReader), secondReader, getTermBytes()); + assertEquals(2, cache.count()); - IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); - loader = new Loader(secondReader, 0); - cache.getOrCompute(entity, loader, secondReader, termBytes); + // no stale keys so far + assertEquals(0, cache.cacheCleanupManager.getStaleKeysCount().get()); + // Close the reader, to be enqueued for cleanup + reader.close(); + // 1 out of 2 keys ie 50% are now stale. + assertEquals(1, cache.cacheCleanupManager.getStaleKeysCount().get()); + // cache count should not be affected + assertEquals(2, cache.count()); - secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); - loader = new Loader(secondReader, 0); - cache.getOrCompute(secondEntity, loader, secondReader, termBytes); + // clean cache with 49% staleness threshold + cache.cacheCleanupManager.cleanCache(); + // cleanup should have taken effect with 49% threshold + assertEquals(1, cache.count()); + assertEquals(0, cache.cacheCleanupManager.getStaleKeysCount().get()); + + IOUtils.close(secondReader); + } + + // when staleness count equal to stale threshold, stale keys should be cleaned up. + public void testCacheCleanupBasedOnStaleThreshold_StalenessEqualToThreshold() throws Exception { + threadPool = getThreadPool(); + Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0.5").build(); + cache = getIndicesRequestCache(settings); + writer.addDocument(newDoc(0, "foo")); + DirectoryReader reader = getReader(writer, indexShard.shardId()); + DirectoryReader secondReader = getReader(writer, indexShard.shardId()); + + // Get 2 entries into the cache + cache.getOrCompute(getEntity(indexShard), getLoader(reader), reader, getTermBytes()); + assertEquals(1, cache.count()); + + cache.getOrCompute(getEntity(indexShard), getLoader(secondReader), secondReader, getTermBytes()); assertEquals(2, cache.count()); // Close the reader, to be enqueued for cleanup - // 1 out of 2 keys ie 50% are now stale. reader.close(); + // 1 out of 2 keys ie 50% are now stale. + assertEquals(1, cache.cacheCleanupManager.getStaleKeysCount().get()); // cache count should not be affected assertEquals(2, cache.count()); // clean cache with 50% staleness threshold cache.cacheCleanupManager.cleanCache(); // cleanup should have taken effect + assertEquals(0, cache.cacheCleanupManager.getStaleKeysCount().get()); assertEquals(1, cache.count()); - IOUtils.close(secondReader, writer, dir, cache); - terminate(threadPool); + IOUtils.close(secondReader); } + // when a cache entry that is Stale is evicted for any reason, we have to deduct the count from our staleness count public void testStaleCount_OnRemovalNotificationOfStaleKey_DecrementsStaleCount() throws Exception { - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexShard indexShard = createIndex("test").getShard(0); - ThreadPool threadPool = getThreadPool(); + threadPool = getThreadPool(); Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0.51").build(); - IndicesRequestCache cache = new IndicesRequestCache(settings, (shardId -> { - IndexService indexService = null; - try { - indexService = indicesService.indexServiceSafe(shardId.getIndex()); - } catch (IndexNotFoundException ex) { - return Optional.empty(); - } - return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); - }), - new CacheModule(new ArrayList<>(), settings).getCacheService(), - threadPool, - ClusterServiceUtils.createClusterService(threadPool) - ); - Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); - + cache = getIndicesRequestCache(settings); writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); - if (randomBoolean()) { - writer.flush(); - IOUtils.close(writer); - writer = new IndexWriter(dir, newIndexWriterConfig()); - } - writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); - DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - - // Get 2 entries into the cache - IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); - Loader loader = new Loader(reader, 0); - cache.getOrCompute(entity, loader, reader, termBytes); - - entity = new IndicesService.IndexShardCacheEntity(indexShard); - loader = new Loader(reader, 0); - cache.getOrCompute(entity, loader, reader, termBytes); + ShardId shardId = indexShard.shardId(); + DirectoryReader reader = getReader(writer, indexShard.shardId()); + DirectoryReader secondReader = getReader(writer, indexShard.shardId()); - IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); - loader = new Loader(secondReader, 0); - cache.getOrCompute(entity, loader, secondReader, termBytes); + // Get 2 entries into the cache from 2 different readers + cache.getOrCompute(getEntity(indexShard), getLoader(reader), reader, getTermBytes()); + assertEquals(1, cache.count()); - secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); - loader = new Loader(secondReader, 0); - cache.getOrCompute(secondEntity, loader, secondReader, termBytes); + cache.getOrCompute(getEntity(indexShard), getLoader(secondReader), secondReader, getTermBytes()); assertEquals(2, cache.count()); - // Close the reader, to be enqueued for cleanup + // assert no stale keys are accounted so far + assertEquals(0, cache.cacheCleanupManager.getStaleKeysCount().get()); + // Close the reader, this should create a stale key reader.close(); - AtomicInteger staleKeysCount = cache.cacheCleanupManager.getStaleKeysCount(); // 1 out of 2 keys ie 50% are now stale. - assertEquals(1, staleKeysCount.get()); + assertEquals(1, cache.cacheCleanupManager.getStaleKeysCount().get()); // cache count should not be affected assertEquals(2, cache.count()); - OpenSearchDirectoryReader.DelegatingCacheHelper delegatingCacheHelper = - (OpenSearchDirectoryReader.DelegatingCacheHelper) secondReader.getReaderCacheHelper(); - String readerCacheKeyId = delegatingCacheHelper.getDelegatingCacheKey().getId(); - IndicesRequestCache.Key key = new IndicesRequestCache.Key( - ((IndexShard) secondEntity.getCacheIdentity()).shardId(), - termBytes, - readerCacheKeyId - ); + IndicesRequestCache.Key key = new IndicesRequestCache.Key(indexShard.shardId(), getTermBytes(), getReaderCacheKeyId(reader)); + // test the mapping + ConcurrentMap> cleanupKeyToCountMap = cache.cacheCleanupManager.getCleanupKeyToCountMap(); + // shard id should exist + assertTrue(cleanupKeyToCountMap.containsKey(shardId)); + // reader CacheKeyId should NOT exist + assertFalse(cleanupKeyToCountMap.get(shardId).containsKey(getReaderCacheKeyId(reader))); + // secondReader CacheKeyId should exist + assertTrue(cleanupKeyToCountMap.get(shardId).containsKey(getReaderCacheKeyId(secondReader))); cache.onRemoval( new RemovalNotification, BytesReference>( new ICacheKey<>(key), - termBytes, + getTermBytes(), RemovalReason.EVICTED ) ); - staleKeysCount = cache.cacheCleanupManager.getStaleKeysCount(); + + // test the mapping, it should stay the same + // shard id should exist + assertTrue(cleanupKeyToCountMap.containsKey(shardId)); + // reader CacheKeyId should NOT exist + assertFalse(cleanupKeyToCountMap.get(shardId).containsKey(getReaderCacheKeyId(reader))); + // secondReader CacheKeyId should exist + assertTrue(cleanupKeyToCountMap.get(shardId).containsKey(getReaderCacheKeyId(secondReader))); // eviction of previous stale key from the cache should decrement staleKeysCount in iRC - assertEquals(0, staleKeysCount.get()); + assertEquals(0, cache.cacheCleanupManager.getStaleKeysCount().get()); - IOUtils.close(secondReader, writer, dir, cache); - terminate(threadPool); + IOUtils.close(secondReader); } - public void testStaleCount_OnRemovalNotificationOfStaleKey_DoesNotDecrementsStaleCount() throws Exception { - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexShard indexShard = createIndex("test").getShard(0); - ThreadPool threadPool = getThreadPool(); + // when a cache entry that is NOT Stale is evicted for any reason, staleness count should NOT be deducted + public void testStaleCount_OnRemovalNotificationOfNonStaleKey_DoesNotDecrementsStaleCount() throws Exception { + threadPool = getThreadPool(); Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0.51").build(); - IndicesRequestCache cache = new IndicesRequestCache(settings, (shardId -> { - IndexService indexService = null; - try { - indexService = indicesService.indexServiceSafe(shardId.getIndex()); - } catch (IndexNotFoundException ex) { - return Optional.empty(); - } - return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); - }), - new CacheModule(new ArrayList<>(), settings).getCacheService(), - threadPool, - ClusterServiceUtils.createClusterService(threadPool) - ); - Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); - + cache = getIndicesRequestCache(settings); writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); - if (randomBoolean()) { - writer.flush(); - IOUtils.close(writer); - writer = new IndexWriter(dir, newIndexWriterConfig()); - } - writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); - DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + ShardId shardId = indexShard.shardId(); + DirectoryReader reader = getReader(writer, indexShard.shardId()); + DirectoryReader secondReader = getReader(writer, indexShard.shardId()); // Get 2 entries into the cache - IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); - Loader loader = new Loader(reader, 0); - cache.getOrCompute(entity, loader, reader, termBytes); - - entity = new IndicesService.IndexShardCacheEntity(indexShard); - loader = new Loader(reader, 0); - cache.getOrCompute(entity, loader, reader, termBytes); - - IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); - loader = new Loader(secondReader, 0); - cache.getOrCompute(entity, loader, secondReader, termBytes); + cache.getOrCompute(getEntity(indexShard), getLoader(reader), reader, getTermBytes()); + assertEquals(1, cache.count()); - secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); - loader = new Loader(secondReader, 0); - cache.getOrCompute(secondEntity, loader, secondReader, termBytes); + cache.getOrCompute(getEntity(indexShard), getLoader(secondReader), secondReader, getTermBytes()); assertEquals(2, cache.count()); // Close the reader, to be enqueued for cleanup @@ -632,102 +513,251 @@ public void testStaleCount_OnRemovalNotificationOfStaleKey_DoesNotDecrementsStal // cache count should not be affected assertEquals(2, cache.count()); - OpenSearchDirectoryReader.DelegatingCacheHelper delegatingCacheHelper = (OpenSearchDirectoryReader.DelegatingCacheHelper) reader - .getReaderCacheHelper(); - String readerCacheKeyId = delegatingCacheHelper.getDelegatingCacheKey().getId(); - IndicesRequestCache.Key key = new IndicesRequestCache.Key( - ((IndexShard) secondEntity.getCacheIdentity()).shardId(), - termBytes, - readerCacheKeyId - ); + // evict entry from second reader (this reader is not closed) + IndicesRequestCache.Key key = new IndicesRequestCache.Key(indexShard.shardId(), getTermBytes(), getReaderCacheKeyId(secondReader)); + + // test the mapping + ConcurrentMap> cleanupKeyToCountMap = cache.cacheCleanupManager.getCleanupKeyToCountMap(); + // shard id should exist + assertTrue(cleanupKeyToCountMap.containsKey(shardId)); + // reader CacheKeyId should NOT exist + assertFalse(cleanupKeyToCountMap.get(shardId).containsKey(getReaderCacheKeyId(reader))); + // secondReader CacheKeyId should exist + assertTrue(cleanupKeyToCountMap.get(shardId).containsKey(getReaderCacheKeyId(secondReader))); cache.onRemoval( new RemovalNotification, BytesReference>( new ICacheKey<>(key), - termBytes, + getTermBytes(), RemovalReason.EVICTED ) ); + + // test the mapping, shardId entry should be cleaned up + // shard id should NOT exist + assertFalse(cleanupKeyToCountMap.containsKey(shardId)); + staleKeysCount = cache.cacheCleanupManager.getStaleKeysCount(); // eviction of NON-stale key from the cache should NOT decrement staleKeysCount in iRC assertEquals(1, staleKeysCount.get()); - IOUtils.close(secondReader, writer, dir, cache); - terminate(threadPool); + IOUtils.close(secondReader); } - public void testCacheCleanupBasedOnStaleThreshold_StalenessGreaterThanThreshold() throws Exception { - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexShard indexShard = createIndex("test").getShard(0); - ThreadPool threadPool = getThreadPool(); - Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0.49").build(); - IndicesRequestCache cache = new IndicesRequestCache(settings, (shardId -> { - IndexService indexService = null; - try { - indexService = indicesService.indexServiceSafe(shardId.getIndex()); - } catch (IndexNotFoundException ex) { - return Optional.empty(); - } - return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); - }), - new CacheModule(new ArrayList<>(), settings).getCacheService(), - threadPool, - ClusterServiceUtils.createClusterService(threadPool) + // when a cache entry that is NOT Stale is evicted WITHOUT its reader closing, we should NOT deduct it from staleness count + public void testStaleCount_WithoutReaderClosing_DecrementsStaleCount() throws Exception { + threadPool = getThreadPool(); + Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0.51").build(); + cache = getIndicesRequestCache(settings); + + writer.addDocument(newDoc(0, "foo")); + DirectoryReader reader = getReader(writer, indexShard.shardId()); + DirectoryReader secondReader = getReader(writer, indexShard.shardId()); + + // Get 2 entries into the cache from 2 different readers + cache.getOrCompute(getEntity(indexShard), getLoader(reader), reader, getTermBytes()); + assertEquals(1, cache.count()); + + cache.getOrCompute(getEntity(indexShard), getLoader(secondReader), secondReader, getTermBytes()); + assertEquals(2, cache.count()); + + // no keys are stale + assertEquals(0, cache.cacheCleanupManager.getStaleKeysCount().get()); + // create notification for removal of non-stale entry + IndicesRequestCache.Key key = new IndicesRequestCache.Key(indexShard.shardId(), getTermBytes(), getReaderCacheKeyId(reader)); + cache.onRemoval( + new RemovalNotification, BytesReference>( + new ICacheKey<>(key), + getTermBytes(), + RemovalReason.EVICTED + ) ); - Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); + // stale keys count should stay zero + assertEquals(0, cache.cacheCleanupManager.getStaleKeysCount().get()); + + IOUtils.close(reader, secondReader); + } + + // test staleness count based on removal notifications + public void testStaleCount_OnRemovalNotifications() throws Exception { + threadPool = getThreadPool(); + Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0.51").build(); + cache = getIndicesRequestCache(settings); writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); - if (randomBoolean()) { - writer.flush(); - IOUtils.close(writer); - writer = new IndexWriter(dir, newIndexWriterConfig()); + DirectoryReader reader = getReader(writer, indexShard.shardId()); + + // Get 5 entries into the cache + int totalKeys = 5; + IndicesService.IndexShardCacheEntity entity = null; + TermQueryBuilder termQuery = null; + BytesReference termBytes = null; + for (int i = 1; i <= totalKeys; i++) { + termQuery = new TermQueryBuilder("id", "" + i); + termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + entity = new IndicesService.IndexShardCacheEntity(indexShard); + Loader loader = new Loader(reader, 0); + cache.getOrCompute(entity, loader, reader, termBytes); + assertEquals(i, cache.count()); } - writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); - DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + // no keys are stale yet + assertEquals(0, cache.cacheCleanupManager.getStaleKeysCount().get()); + // closing the reader should make all keys stale + reader.close(); + assertEquals(totalKeys, cache.cacheCleanupManager.getStaleKeysCount().get()); - // Get 2 entries into the cache - IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); - Loader loader = new Loader(reader, 0); - cache.getOrCompute(entity, loader, reader, termBytes); + String readerCacheKeyId = getReaderCacheKeyId(reader); + IndicesRequestCache.Key key = new IndicesRequestCache.Key( + ((IndexShard) entity.getCacheIdentity()).shardId(), + termBytes, + readerCacheKeyId + ); - entity = new IndicesService.IndexShardCacheEntity(indexShard); - loader = new Loader(reader, 0); - cache.getOrCompute(entity, loader, reader, termBytes); + int staleCount = cache.cacheCleanupManager.getStaleKeysCount().get(); + // Notification for Replaced should not deduct the staleCount + cache.onRemoval( + new RemovalNotification, BytesReference>( + new ICacheKey<>(key), + getTermBytes(), + RemovalReason.REPLACED + ) + ); + // stale keys count should stay the same + assertEquals(staleCount, cache.cacheCleanupManager.getStaleKeysCount().get()); + + // Notification for all but Replaced should deduct the staleCount + RemovalReason[] reasons = { RemovalReason.INVALIDATED, RemovalReason.EVICTED, RemovalReason.EXPLICIT, RemovalReason.CAPACITY }; + for (RemovalReason reason : reasons) { + cache.onRemoval( + new RemovalNotification, BytesReference>(new ICacheKey<>(key), getTermBytes(), reason) + ); + assertEquals(--staleCount, cache.cacheCleanupManager.getStaleKeysCount().get()); + } + } - IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); - loader = new Loader(secondReader, 0); - cache.getOrCompute(entity, loader, secondReader, termBytes); + // when staleness count less than the stale threshold, stale keys should NOT be cleaned up. + public void testCacheCleanupBasedOnStaleThreshold_StalenessLesserThanThreshold() throws Exception { + threadPool = getThreadPool(); + Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "51%").build(); + cache = getIndicesRequestCache(settings); - secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); - loader = new Loader(secondReader, 0); - cache.getOrCompute(secondEntity, loader, secondReader, termBytes); + writer.addDocument(newDoc(0, "foo")); + DirectoryReader reader = getReader(writer, indexShard.shardId()); + DirectoryReader secondReader = getReader(writer, indexShard.shardId()); + + // Get 2 entries into the cache + cache.getOrCompute(getEntity(indexShard), getLoader(reader), reader, getTermBytes()); + assertEquals(1, cache.count()); + + cache.getOrCompute(getEntity(indexShard), getLoader(secondReader), secondReader, getTermBytes()); assertEquals(2, cache.count()); // Close the reader, to be enqueued for cleanup - // 1 out of 2 keys ie 50% are now stale. reader.close(); + // 1 out of 2 keys ie 50% are now stale. + assertEquals(1, cache.cacheCleanupManager.getStaleKeysCount().get()); // cache count should not be affected assertEquals(2, cache.count()); - // clean cache with 49% staleness threshold + // clean cache with 51% staleness threshold cache.cacheCleanupManager.cleanCache(); - // cleanup should have taken effect with 49% threshold + // cleanup should have been ignored + assertEquals(1, cache.cacheCleanupManager.getStaleKeysCount().get()); + assertEquals(2, cache.count()); + + IOUtils.close(secondReader); + } + + // test the cleanupKeyToCountMap are set appropriately when both readers are closed + public void testCleanupKeyToCountMapAreSetAppropriately() throws Exception { + threadPool = getThreadPool(); + Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0.51").build(); + cache = getIndicesRequestCache(settings); + + writer.addDocument(newDoc(0, "foo")); + ShardId shardId = indexShard.shardId(); + DirectoryReader reader = getReader(writer, shardId); + DirectoryReader secondReader = getReader(writer, shardId); + + // Get 2 entries into the cache from 2 different readers + cache.getOrCompute(getEntity(indexShard), getLoader(reader), reader, getTermBytes()); assertEquals(1, cache.count()); + // test the mappings + ConcurrentMap> cleanupKeyToCountMap = cache.cacheCleanupManager.getCleanupKeyToCountMap(); + assertEquals(1, (int) cleanupKeyToCountMap.get(shardId).get(getReaderCacheKeyId(reader))); - IOUtils.close(secondReader, writer, dir, cache); - terminate(threadPool); + cache.getOrCompute(getEntity(indexShard), getLoader(secondReader), secondReader, getTermBytes()); + // test the mapping + assertEquals(2, cache.count()); + assertEquals(1, (int) cleanupKeyToCountMap.get(shardId).get(getReaderCacheKeyId(secondReader))); + // create another entry for the second reader + cache.getOrCompute(getEntity(indexShard), getLoader(secondReader), secondReader, getTermBytes("id", "1")); + // test the mapping + assertEquals(3, cache.count()); + assertEquals(2, (int) cleanupKeyToCountMap.get(shardId).get(getReaderCacheKeyId(secondReader))); + + // Close the reader, to create stale entries + reader.close(); + // cache count should not be affected + assertEquals(3, cache.count()); + // test the mapping, first reader's entry should be removed from the mapping and accounted for in the staleKeysCount + assertFalse(cleanupKeyToCountMap.get(shardId).containsKey(getReaderCacheKeyId(reader))); + assertEquals(1, cache.cacheCleanupManager.getStaleKeysCount().get()); + // second reader's mapping should not be affected + assertEquals(2, (int) cleanupKeyToCountMap.get(shardId).get(getReaderCacheKeyId(secondReader))); + // send removal notification for first reader + IndicesRequestCache.Key key = new IndicesRequestCache.Key(indexShard.shardId(), getTermBytes(), getReaderCacheKeyId(reader)); + cache.onRemoval( + new RemovalNotification, BytesReference>( + new ICacheKey<>(key), + getTermBytes(), + RemovalReason.EVICTED + ) + ); + // test the mapping, it should stay the same + assertFalse(cleanupKeyToCountMap.get(shardId).containsKey(getReaderCacheKeyId(reader))); + // staleKeysCount should be decremented + assertEquals(0, cache.cacheCleanupManager.getStaleKeysCount().get()); + // second reader's mapping should not be affected + assertEquals(2, (int) cleanupKeyToCountMap.get(shardId).get(getReaderCacheKeyId(secondReader))); + + // Without closing the secondReader send removal notification of one of its key + key = new IndicesRequestCache.Key(indexShard.shardId(), getTermBytes(), getReaderCacheKeyId(secondReader)); + cache.onRemoval( + new RemovalNotification, BytesReference>( + new ICacheKey<>(key), + getTermBytes(), + RemovalReason.EVICTED + ) + ); + // staleKeysCount should be the same as before + assertEquals(0, cache.cacheCleanupManager.getStaleKeysCount().get()); + // secondReader's readerCacheKeyId count should be decremented by 1 + assertEquals(1, (int) cleanupKeyToCountMap.get(shardId).get(getReaderCacheKeyId(secondReader))); + // Without closing the secondReader send removal notification of its last key + key = new IndicesRequestCache.Key(indexShard.shardId(), getTermBytes(), getReaderCacheKeyId(secondReader)); + cache.onRemoval( + new RemovalNotification, BytesReference>( + new ICacheKey<>(key), + getTermBytes(), + RemovalReason.EVICTED + ) + ); + // staleKeysCount should be the same as before + assertEquals(0, cache.cacheCleanupManager.getStaleKeysCount().get()); + // since all the readers of this shard is closed, the cleanupKeyToCountMap should have no entries + assertEquals(0, cleanupKeyToCountMap.size()); + + IOUtils.close(secondReader); } - public void testCacheCleanupBasedOnStaleThreshold_StalenessLesserThanThreshold() throws Exception { + private DirectoryReader getReader(IndexWriter writer, ShardId shardId) throws IOException { + return OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), shardId); + } + + private IndicesRequestCache getIndicesRequestCache(Settings settings) { IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexShard indexShard = createIndex("test").getShard(0); - ThreadPool threadPool = getThreadPool(); - Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "51%").build(); - IndicesRequestCache cache = new IndicesRequestCache(settings, (shardId -> { + return new IndicesRequestCache(settings, (shardId -> { IndexService indexService = null; try { indexService = indicesService.indexServiceSafe(shardId.getIndex()); @@ -740,52 +770,30 @@ public void testCacheCleanupBasedOnStaleThreshold_StalenessLesserThanThreshold() threadPool, ClusterServiceUtils.createClusterService(threadPool) ); - Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); - - writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); - if (randomBoolean()) { - writer.flush(); - IOUtils.close(writer); - writer = new IndexWriter(dir, newIndexWriterConfig()); - } - writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); - DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - - // Get 2 entries into the cache - IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); - Loader loader = new Loader(reader, 0); - cache.getOrCompute(entity, loader, reader, termBytes); - - entity = new IndicesService.IndexShardCacheEntity(indexShard); - loader = new Loader(reader, 0); - cache.getOrCompute(entity, loader, reader, termBytes); + } - IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); - loader = new Loader(secondReader, 0); - cache.getOrCompute(entity, loader, secondReader, termBytes); + private Loader getLoader(DirectoryReader reader) { + return new Loader(reader, 0); + } - secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); - loader = new Loader(secondReader, 0); - cache.getOrCompute(secondEntity, loader, secondReader, termBytes); - assertEquals(2, cache.count()); + private IndicesService.IndexShardCacheEntity getEntity(IndexShard indexShard) { + return new IndicesService.IndexShardCacheEntity(indexShard); + } - // Close the reader, to be enqueued for cleanup - // 1 out of 2 keys ie 50% are now stale. - reader.close(); - // cache count should not be affected - assertEquals(2, cache.count()); + private BytesReference getTermBytes() throws IOException { + TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); + return XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + } - // clean cache with 51% staleness threshold - cache.cacheCleanupManager.cleanCache(); - // cleanup should have been ignored - assertEquals(2, cache.count()); + private BytesReference getTermBytes(String fieldName, String value) throws IOException { + TermQueryBuilder termQuery = new TermQueryBuilder(fieldName, value); + return XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + } - IOUtils.close(secondReader, writer, dir, cache); - terminate(threadPool); + private String getReaderCacheKeyId(DirectoryReader reader) { + OpenSearchDirectoryReader.DelegatingCacheHelper delegatingCacheHelper = (OpenSearchDirectoryReader.DelegatingCacheHelper) reader + .getReaderCacheHelper(); + return delegatingCacheHelper.getDelegatingCacheKey().getId(); } public void testClosingIndexWipesStats() throws Exception { @@ -795,6 +803,8 @@ public void testClosingIndexWipesStats() throws Exception { Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards).build(); String indexToKeepName = "test"; String indexToCloseName = "test2"; + // delete all indices if already + assertAcked(client().admin().indices().prepareDelete("_all").get()); IndexService indexToKeep = createIndex(indexToKeepName, indexSettings); IndexService indexToClose = createIndex(indexToCloseName, indexSettings); for (int i = 0; i < numShards; i++) { @@ -802,9 +812,9 @@ public void testClosingIndexWipesStats() throws Exception { assertNotNull(indexToKeep.getShard(i)); assertNotNull(indexToClose.getShard(i)); } - ThreadPool threadPool = getThreadPool(); + threadPool = getThreadPool(); Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0.001%").build(); - IndicesRequestCache cache = new IndicesRequestCache(settings, (shardId -> { + cache = new IndicesRequestCache(settings, (shardId -> { IndexService indexService = null; try { indexService = indicesService.indexServiceSafe(shardId.getIndex()); @@ -821,8 +831,6 @@ public void testClosingIndexWipesStats() throws Exception { threadPool, ClusterServiceUtils.createClusterService(threadPool) ); - Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); writer.addDocument(newDoc(0, "foo")); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); @@ -895,47 +903,27 @@ public void testClosingIndexWipesStats() throws Exception { for (DirectoryReader reader : readersToKeep) { IOUtils.close(reader); } - IOUtils.close(secondReader, writer, dir, cache); - terminate(threadPool); + IOUtils.close(secondReader); } public void testEviction() throws Exception { final ByteSizeValue size; { - IndexShard indexShard = createIndex("test").getShard(0); - ThreadPool threadPool = getThreadPool(); - IndicesRequestCache cache = new IndicesRequestCache( - Settings.EMPTY, - (shardId -> Optional.of(new IndicesService.IndexShardCacheEntity(indexShard))), - new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService(), - threadPool, - ClusterServiceUtils.createClusterService(threadPool) - ); - Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); - + threadPool = getThreadPool(); + cache = getIndicesRequestCache(Settings.EMPTY); writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); - IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); - Loader loader = new Loader(reader, 0); - + DirectoryReader reader = getReader(writer, indexShard.shardId()); writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); - Loader secondLoader = new Loader(secondReader, 0); - BytesReference value1 = cache.getOrCompute(entity, loader, reader, termBytes); + BytesReference value1 = cache.getOrCompute(getEntity(indexShard), getLoader(reader), reader, getTermBytes()); assertEquals("foo", value1.streamInput().readString()); - BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termBytes); + BytesReference value2 = cache.getOrCompute(getEntity(indexShard), getLoader(secondReader), secondReader, getTermBytes()); assertEquals("bar", value2.streamInput().readString()); size = new ByteSizeValue(cache.getSizeInBytes()); IOUtils.close(reader, secondReader, writer, dir, cache); - terminate(threadPool); } - IndexShard indexShard = createIndex("test1").getShard(0); - ThreadPool threadPool = getThreadPool(); + indexShard = createIndex("test1").getShard(0); IndicesRequestCache cache = new IndicesRequestCache( // Add 5 instead of 1; the key size now depends on the length of dimension names and values so there's more variation Settings.builder().put(IndicesRequestCache.INDICES_CACHE_QUERY_SIZE.getKey(), size.getBytes() + 5 + "b").build(), @@ -944,83 +932,52 @@ public void testEviction() throws Exception { threadPool, ClusterServiceUtils.createClusterService(threadPool) ); - Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); - + dir = newDirectory(); + writer = new IndexWriter(dir, newIndexWriterConfig()); writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); - IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); - Loader loader = new Loader(reader, 0); - + DirectoryReader reader = getReader(writer, indexShard.shardId()); writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); - DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); - Loader secondLoader = new Loader(secondReader, 0); - + DirectoryReader secondReader = getReader(writer, indexShard.shardId()); writer.updateDocument(new Term("id", "0"), newDoc(0, "baz")); DirectoryReader thirdReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - IndicesService.IndexShardCacheEntity thirddEntity = new IndicesService.IndexShardCacheEntity(indexShard); - Loader thirdLoader = new Loader(thirdReader, 0); - BytesReference value1 = cache.getOrCompute(entity, loader, reader, termBytes); + BytesReference value1 = cache.getOrCompute(getEntity(indexShard), getLoader(reader), reader, getTermBytes()); assertEquals("foo", value1.streamInput().readString()); - BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termBytes); + BytesReference value2 = cache.getOrCompute(getEntity(indexShard), getLoader(secondReader), secondReader, getTermBytes()); assertEquals("bar", value2.streamInput().readString()); logger.info("Memory size: {}", indexShard.requestCache().stats().getMemorySize()); - BytesReference value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termBytes); + BytesReference value3 = cache.getOrCompute(getEntity(indexShard), getLoader(thirdReader), thirdReader, getTermBytes()); assertEquals("baz", value3.streamInput().readString()); assertEquals(2, cache.count()); assertEquals(1, indexShard.requestCache().stats().getEvictions()); - IOUtils.close(reader, secondReader, thirdReader, writer, dir, cache); - terminate(threadPool); + IOUtils.close(reader, secondReader, thirdReader); } public void testClearAllEntityIdentity() throws Exception { - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexShard indexShard = createIndex("test").getShard(0); - ThreadPool threadPool = getThreadPool(); - IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY, (shardId -> { - IndexService indexService = null; - try { - indexService = indicesService.indexServiceSafe(shardId.getIndex()); - } catch (IndexNotFoundException ex) { - return Optional.empty(); - } - return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); - }), - new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService(), - threadPool, - ClusterServiceUtils.createClusterService(threadPool) - ); - - Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); - + threadPool = getThreadPool(); + cache = getIndicesRequestCache(Settings.EMPTY); writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + DirectoryReader reader = getReader(writer, indexShard.shardId()); IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); Loader loader = new Loader(reader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); - DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + DirectoryReader secondReader = getReader(writer, indexShard.shardId()); IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); Loader secondLoader = new Loader(secondReader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "baz")); - DirectoryReader thirdReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + DirectoryReader thirdReader = getReader(writer, indexShard.shardId()); + ; IndicesService.IndexShardCacheEntity thirddEntity = new IndicesService.IndexShardCacheEntity(createIndex("test1").getShard(0)); Loader thirdLoader = new Loader(thirdReader, 0); - BytesReference value1 = cache.getOrCompute(entity, loader, reader, termBytes); + BytesReference value1 = cache.getOrCompute(entity, loader, reader, getTermBytes()); assertEquals("foo", value1.streamInput().readString()); - BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termBytes); + BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, getTermBytes()); assertEquals("bar", value2.streamInput().readString()); logger.info("Memory size: {}", indexShard.requestCache().stats().getMemorySize()); - BytesReference value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termBytes); + BytesReference value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, getTermBytes()); assertEquals("baz", value3.streamInput().readString()); assertEquals(3, cache.count()); RequestCacheStats requestCacheStats = entity.stats().stats(); @@ -1031,14 +988,13 @@ public void testClearAllEntityIdentity() throws Exception { cache.cacheCleanupManager.cleanCache(); assertEquals(1, cache.count()); // third has not been validated since it's a different identity - value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termBytes); + value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, getTermBytes()); requestCacheStats = entity.stats().stats(); requestCacheStats.add(thirddEntity.stats().stats()); assertEquals(hitCount + 1, requestCacheStats.getHitCount()); assertEquals("baz", value3.streamInput().readString()); - IOUtils.close(reader, secondReader, thirdReader, writer, dir, cache); - terminate(threadPool); + IOUtils.close(reader, secondReader, thirdReader); } public Iterable newDoc(int id, String value) { @@ -1050,7 +1006,7 @@ public Iterable newDoc(int id, String value) { private static class Loader implements CheckedSupplier { - private final DirectoryReader reader; + final DirectoryReader reader; private final int id; public boolean loadedFromCache = true; @@ -1074,38 +1030,18 @@ public BytesReference get() { throw new RuntimeException(e); } } - } public void testInvalidate() throws Exception { - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexShard indexShard = createIndex("test").getShard(0); - ThreadPool threadPool = getThreadPool(); - IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY, (shardId -> { - IndexService indexService = null; - try { - indexService = indicesService.indexServiceSafe(shardId.getIndex()); - } catch (IndexNotFoundException ex) { - return Optional.empty(); - } - return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); - }), - new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService(), - threadPool, - ClusterServiceUtils.createClusterService(threadPool) - ); - Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); - + threadPool = getThreadPool(); + IndicesRequestCache cache = getIndicesRequestCache(Settings.EMPTY); writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + DirectoryReader reader = getReader(writer, indexShard.shardId()); // initial cache IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); Loader loader = new Loader(reader, 0); - BytesReference value = cache.getOrCompute(entity, loader, reader, termBytes); + BytesReference value = cache.getOrCompute(entity, loader, reader, getTermBytes()); assertEquals("foo", value.streamInput().readString()); ShardRequestCache requestCacheStats = entity.stats(); assertEquals(0, requestCacheStats.stats().getHitCount()); @@ -1117,7 +1053,7 @@ public void testInvalidate() throws Exception { // cache hit entity = new IndicesService.IndexShardCacheEntity(indexShard); loader = new Loader(reader, 0); - value = cache.getOrCompute(entity, loader, reader, termBytes); + value = cache.getOrCompute(entity, loader, reader, getTermBytes()); assertEquals("foo", value.streamInput().readString()); requestCacheStats = entity.stats(); assertEquals(1, requestCacheStats.stats().getHitCount()); @@ -1131,8 +1067,8 @@ public void testInvalidate() throws Exception { // load again after invalidate entity = new IndicesService.IndexShardCacheEntity(indexShard); loader = new Loader(reader, 0); - cache.invalidate(entity, reader, termBytes); - value = cache.getOrCompute(entity, loader, reader, termBytes); + cache.invalidate(entity, reader, getTermBytes()); + value = cache.getOrCompute(entity, loader, reader, getTermBytes()); assertEquals("foo", value.streamInput().readString()); requestCacheStats = entity.stats(); assertEquals(1, requestCacheStats.stats().getHitCount()); @@ -1157,16 +1093,11 @@ public void testInvalidate() throws Exception { assertEquals(0, cache.count()); assertEquals(0, requestCacheStats.stats().getMemorySize().bytesAsInt()); - IOUtils.close(reader, writer, dir, cache); - terminate(threadPool); + IOUtils.close(reader); assertEquals(0, cache.numRegisteredCloseListeners()); } public void testEqualsKey() throws IOException { - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - Directory dir = newDirectory(); - IndexWriterConfig config = newIndexWriterConfig(); - IndexWriter writer = new IndexWriter(dir, config); ShardId shardId = new ShardId("foo", "bar", 1); ShardId shardId1 = new ShardId("foo1", "bar1", 2); IndexReader reader1 = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), shardId); @@ -1193,13 +1124,9 @@ public void testEqualsKey() throws IOException { } public void testSerializationDeserializationOfCacheKey() throws Exception { - TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); - IndexService indexService = createIndex("test"); - IndexShard indexShard = indexService.getShard(0); IndicesService.IndexShardCacheEntity shardCacheEntity = new IndicesService.IndexShardCacheEntity(indexShard); String readerCacheKeyId = UUID.randomUUID().toString(); - IndicesRequestCache.Key key1 = new IndicesRequestCache.Key(indexShard.shardId(), termBytes, readerCacheKeyId); + IndicesRequestCache.Key key1 = new IndicesRequestCache.Key(indexShard.shardId(), getTermBytes(), readerCacheKeyId); BytesReference bytesReference = null; try (BytesStreamOutput out = new BytesStreamOutput()) { key1.writeTo(out); @@ -1211,8 +1138,7 @@ public void testSerializationDeserializationOfCacheKey() throws Exception { assertEquals(readerCacheKeyId, key2.readerCacheKeyId); assertEquals(((IndexShard) shardCacheEntity.getCacheIdentity()).shardId(), key2.shardId); - assertEquals(termBytes, key2.value); - + assertEquals(getTermBytes(), key2.value); } private class TestBytesReference extends AbstractBytesReference { From c055a3d6b7721b983b245c22f3bba335a547b2a3 Mon Sep 17 00:00:00 2001 From: Pranshu Shukla <55992439+Pranshu-S@users.noreply.github.com> Date: Thu, 25 Apr 2024 06:18:53 +0530 Subject: [PATCH 33/37] Fix Flaky Test PersistentTasksExecutorFullRestartIT.testFullClusterRestart (#13350) Signed-off-by: Pranshu Shukla --- .../persistent/PersistentTasksExecutorFullRestartIT.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorFullRestartIT.java b/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorFullRestartIT.java index 708388b3328f0..151a207d2c191 100644 --- a/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorFullRestartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorFullRestartIT.java @@ -43,6 +43,7 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -129,7 +130,7 @@ public void testFullClusterRestart() throws Exception { .custom(PersistentTasksCustomMetadata.TYPE)).tasks(), empty() ); - }); + }, 20, TimeUnit.SECONDS); } } From 9e9ab6b3c406be2dcc701934ff03c0e56911819a Mon Sep 17 00:00:00 2001 From: Shubh Sahu Date: Thu, 25 Apr 2024 07:19:52 +0530 Subject: [PATCH 34/37] [Remote Store] Add capability of doing flush as determined by the translog (#12992) Signed-off-by: Shubh Sahu --- CHANGELOG.md | 1 + .../opensearch/remotestore/RemoteStoreIT.java | 43 +++++++++++++++++++ .../common/settings/ClusterSettings.java | 3 +- .../opensearch/index/shard/IndexShard.java | 3 +- .../translog/InternalTranslogManager.java | 6 +++ .../index/translog/RemoteFsTranslog.java | 11 +++++ .../opensearch/index/translog/Translog.java | 9 ++++ .../transfer/TranslogTransferManager.java | 4 ++ .../indices/RemoteStoreSettings.java | 23 ++++++++++ ...RemoteStoreSettingsDynamicUpdateTests.java | 20 +++++++++ 10 files changed, 121 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b80741f24e275..f2fbf895dafcf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add an individual setting of rate limiter for segment replication ([#12959](https://github.com/opensearch-project/OpenSearch/pull/12959)) - [Streaming Indexing] Ensure support of the new transport by security plugin ([#13174](https://github.com/opensearch-project/OpenSearch/pull/13174)) - Add cluster setting to dynamically configure the buckets for filter rewrite optimization. ([#13179](https://github.com/opensearch-project/OpenSearch/pull/13179)) +- [Remote Store] Add capability of doing refresh as determined by the translog ([#12992](https://github.com/opensearch-project/OpenSearch/pull/12992)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 78441f74f6b4f..ca0ae3ca9a700 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -30,6 +30,7 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardClosedException; +import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.Translog.Durability; import org.opensearch.indices.IndicesService; import org.opensearch.indices.RemoteStoreSettings; @@ -63,6 +64,7 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; +import static org.opensearch.index.shard.IndexShardTestCase.getTranslog; import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.test.OpenSearchTestCase.getShardLevelBlobPath; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -859,4 +861,45 @@ public void testLocalOnlyTranslogCleanupOnNodeRestart() throws Exception { refresh(INDEX_NAME); assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), searchableDocs + 15); } + + public void testFlushOnTooManyRemoteTranslogFiles() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + String datanode = internalCluster().startDataOnlyNodes(1).get(0); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); + ensureGreen(INDEX_NAME); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder().put(RemoteStoreSettings.CLUSTER_REMOTE_MAX_TRANSLOG_READERS.getKey(), "100") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + IndexShard indexShard = getIndexShard(datanode, INDEX_NAME); + Path translogLocation = getTranslog(indexShard).location(); + assertFalse(indexShard.shouldPeriodicallyFlush()); + + try (Stream files = Files.list(translogLocation)) { + long totalFiles = files.filter(f -> f.getFileName().toString().endsWith(Translog.TRANSLOG_FILE_SUFFIX)).count(); + assertEquals(totalFiles, 1L); + } + + // indexing 100 documents (100 bulk requests), no flush will be triggered yet + for (int i = 0; i < 100; i++) { + indexBulk(INDEX_NAME, 1); + } + + try (Stream files = Files.list(translogLocation)) { + long totalFiles = files.filter(f -> f.getFileName().toString().endsWith(Translog.TRANSLOG_FILE_SUFFIX)).count(); + assertEquals(totalFiles, 101L); + } + // Will flush and trim the translog readers + indexBulk(INDEX_NAME, 1); + + assertBusy(() -> { + try (Stream files = Files.list(translogLocation)) { + long totalFiles = files.filter(f -> f.getFileName().toString().endsWith(Translog.TRANSLOG_FILE_SUFFIX)).count(); + assertEquals(totalFiles, 1L); + } + }, 30, TimeUnit.SECONDS); + } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index dab0f6bcf1c85..c70f22be518f2 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -732,7 +732,8 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, - RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING + RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, + RemoteStoreSettings.CLUSTER_REMOTE_MAX_TRANSLOG_READERS ) ) ); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 72a67096fcd9e..cbf10c9e4e552 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -2917,7 +2917,7 @@ public void restoreFromRepository(Repository repository, ActionListener * * @return {@code true} if the engine should be flushed */ - boolean shouldPeriodicallyFlush() { + public boolean shouldPeriodicallyFlush() { final Engine engine = getEngineOrNull(); if (engine != null) { try { @@ -4493,6 +4493,7 @@ public Durability getTranslogDurability() { /** * Schedules a flush or translog generation roll if needed but will not schedule more than one concurrently. The operation will be * executed asynchronously on the flush thread pool. + * Can also schedule a flush if decided by translog manager */ public void afterWriteOperation() { if (shouldPeriodicallyFlush() || shouldRollTranslogGeneration()) { diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java index a22c538286a88..e2210217672ef 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java @@ -437,6 +437,12 @@ public String getTranslogUUID() { * @return if the translog should be flushed */ public boolean shouldPeriodicallyFlush(long localCheckpointOfLastCommit, long flushThreshold) { + /* + * This can trigger flush depending upon translog's implementation + */ + if (translog.shouldFlush()) { + return true; + } // This is the minimum seqNo that is referred in translog and considered for calculating translog size long minTranslogRefSeqNo = translog.getMinUnreferencedSeqNoInSegments(localCheckpointOfLastCommit + 1); final long minReferencedTranslogGeneration = translog.getMinGenerationForSeqNo(minTranslogRefSeqNo).translogFileGeneration; diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index da905b9605dfd..8c01791af2c9e 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -675,4 +675,15 @@ public long getMinUnreferencedSeqNoInSegments(long minUnrefCheckpointInLastCommi int availablePermits() { return syncPermit.availablePermits(); } + + /** + * Checks whether or not the shard should be flushed based on translog files. + * This checks if number of translog files breaches the threshold count determined by + * {@code cluster.remote_store.translog.max_readers} setting + * @return {@code true} if the shard should be flushed + */ + @Override + protected boolean shouldFlush() { + return readers.size() >= translogTransferManager.getMaxRemoteTranslogReadersSettings(); + } } diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index c653605f8fa10..842e9c77d2350 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -2082,4 +2082,13 @@ public static String createEmptyTranslog( public long getMinUnreferencedSeqNoInSegments(long minUnrefCheckpointInLastCommit) { return minUnrefCheckpointInLastCommit; } + + /** + * Checks whether or not the shard should be flushed based on translog files. + * each translog type can have it's own decider + * @return {@code true} if the shard should be flushed + */ + protected boolean shouldFlush() { + return false; + } } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index 1087244623b87..47638f44fd6fc 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -585,4 +585,8 @@ public void onFailure(Exception e) { throw e; } } + + public int getMaxRemoteTranslogReadersSettings() { + return this.remoteStoreSettings.getMaxRemoteTranslogReaders(); + } } diff --git a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java index e0a9f7a9e05c1..0bd4c7aedfc03 100644 --- a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java +++ b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java @@ -94,11 +94,23 @@ public class RemoteStoreSettings { Property.Dynamic ); + /** + * Controls the maximum referenced remote translog files. If breached the shard will be flushed. + */ + public static final Setting CLUSTER_REMOTE_MAX_TRANSLOG_READERS = Setting.intSetting( + "cluster.remote_store.translog.max_readers", + 1000, + 100, + Property.Dynamic, + Property.NodeScope + ); + private volatile TimeValue clusterRemoteTranslogBufferInterval; private volatile int minRemoteSegmentMetadataFiles; private volatile TimeValue clusterRemoteTranslogTransferTimeout; private volatile RemoteStoreEnums.PathType pathType; private volatile RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm; + private volatile int maxRemoteTranslogReaders; public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); @@ -124,6 +136,9 @@ public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { pathHashAlgorithm = clusterSettings.get(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING); clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, this::setPathHashAlgorithm); + + maxRemoteTranslogReaders = CLUSTER_REMOTE_MAX_TRANSLOG_READERS.get(settings); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_MAX_TRANSLOG_READERS, this::setMaxRemoteTranslogReaders); } public TimeValue getClusterRemoteTranslogBufferInterval() { @@ -167,4 +182,12 @@ private void setPathType(RemoteStoreEnums.PathType pathType) { private void setPathHashAlgorithm(RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm) { this.pathHashAlgorithm = pathHashAlgorithm; } + + public int getMaxRemoteTranslogReaders() { + return maxRemoteTranslogReaders; + } + + private void setMaxRemoteTranslogReaders(int maxRemoteTranslogReaders) { + this.maxRemoteTranslogReaders = maxRemoteTranslogReaders; + } } diff --git a/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java b/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java index 8a77d97f88d67..f89fd3df6e340 100644 --- a/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java +++ b/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java @@ -96,4 +96,24 @@ public void testClusterRemoteTranslogTransferTimeout() { ); assertEquals(TimeValue.timeValueSeconds(40), remoteStoreSettings.getClusterRemoteTranslogTransferTimeout()); } + + public void testMaxRemoteReferencedTranslogFiles() { + // Test default value + assertEquals(1000, remoteStoreSettings.getMaxRemoteTranslogReaders()); + + // Test override with valid value + clusterSettings.applySettings( + Settings.builder().put(RemoteStoreSettings.CLUSTER_REMOTE_MAX_TRANSLOG_READERS.getKey(), "500").build() + ); + assertEquals(500, remoteStoreSettings.getMaxRemoteTranslogReaders()); + + // Test override with value less than minimum + assertThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings( + Settings.builder().put(RemoteStoreSettings.CLUSTER_REMOTE_MAX_TRANSLOG_READERS.getKey(), "99").build() + ) + ); + assertEquals(500, remoteStoreSettings.getMaxRemoteTranslogReaders()); + } } From a8008e2f37ec7dca061fcb7b5f84c944dd4f1108 Mon Sep 17 00:00:00 2001 From: Bhumika Saini Date: Thu, 25 Apr 2024 12:27:29 +0530 Subject: [PATCH 35/37] Integrate local recovery with remote store seeding during migration (#12922) Signed-off-by: Bhumika Saini --- .../RemotePrimaryLocalRecoveryIT.java | 179 ++++++++++++++++++ .../org/opensearch/index/IndexService.java | 3 +- .../index/seqno/ReplicationTracker.java | 11 +- .../opensearch/index/shard/IndexShard.java | 5 +- .../opensearch/index/shard/StoreRecovery.java | 8 + .../index/translog/RemoteFsTranslog.java | 19 +- .../index/translog/TranslogConfig.java | 21 +- .../translog/TruncateTranslogAction.java | 3 +- .../index/engine/InternalEngineTests.java | 8 +- .../index/shard/RefreshListenersTests.java | 3 +- .../InternalTranslogManagerTests.java | 14 +- .../index/translog/LocalTranslogTests.java | 8 +- .../index/translog/RemoteFsTranslogTests.java | 20 +- .../translog/TranslogManagerTestCase.java | 9 +- .../index/engine/EngineTestCase.java | 21 +- 15 files changed, 285 insertions(+), 47 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryLocalRecoveryIT.java diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryLocalRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryLocalRecoveryIT.java new file mode 100644 index 0000000000000..024fc68602a19 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryLocalRecoveryIT.java @@ -0,0 +1,179 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.index.remote.RemoteSegmentStats; +import org.opensearch.index.translog.RemoteTranslogStats; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.file.Path; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.SEGMENT_NAME_UUID_SEPARATOR; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemotePrimaryLocalRecoveryIT extends MigrationBaseTestCase { + String indexName = "idx1"; + int numOfNodes = randomIntBetween(6, 9); + + /** + * Tests local recovery sanity in the happy path flow + */ + public void testLocalRecoveryRollingRestart() throws Exception { + triggerRollingRestartForRemoteMigration(0); + internalCluster().stopAllNodes(); + } + + /** + * Tests local recovery sanity during remote migration with a node restart in between + */ + public void testLocalRecoveryRollingRestartAndNodeFailure() throws Exception { + triggerRollingRestartForRemoteMigration(0); + + DiscoveryNodes discoveryNodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + DiscoveryNode nodeToRestart = (DiscoveryNode) discoveryNodes.getDataNodes().values().toArray()[randomIntBetween(0, numOfNodes - 4)]; + internalCluster().restartNode(nodeToRestart.getName()); + + Map shardStatsMap = internalCluster().client().admin().indices().prepareStats(indexName).get().asMap(); + for (Map.Entry entry : shardStatsMap.entrySet()) { + ShardRouting shardRouting = entry.getKey(); + ShardStats shardStats = entry.getValue(); + if (nodeToRestart.equals(shardRouting.currentNodeId())) { + RemoteSegmentStats remoteSegmentStats = shardStats.getStats().getSegments().getRemoteSegmentStats(); + assertTrue(remoteSegmentStats.getTotalUploadTime() > 0); + assertTrue(remoteSegmentStats.getUploadBytesSucceeded() > 0); + } + + assertBusy(() -> { + String shardPath = getShardLevelBlobPath( + client(), + indexName, + new BlobPath(), + String.valueOf(shardRouting.getId()), + SEGMENTS, + DATA + ).buildAsString(); + Path segmentDataRepoPath = segmentRepoPath.resolve(shardPath); + List segmentsNFilesInRepo = Arrays.stream(FileSystemUtils.files(segmentDataRepoPath)) + .filter(path -> path.getFileName().toString().contains("segments_")) + .map(path -> path.getFileName().toString()) + .collect(Collectors.toList()); + Set expectedUniqueSegmentsNFiles = segmentsNFilesInRepo.stream() + .map(fileName -> fileName.split(SEGMENT_NAME_UUID_SEPARATOR)[0]) + .collect(Collectors.toSet()); + assertEquals( + "Expected no duplicate segments_N files in remote but duplicates were found " + segmentsNFilesInRepo, + expectedUniqueSegmentsNFiles.size(), + segmentsNFilesInRepo.size() + ); + }, 90, TimeUnit.SECONDS); + } + + internalCluster().stopAllNodes(); + } + + /** + * Tests local recovery flow sanity in the happy path flow with replicas in place + */ + public void testLocalRecoveryFlowWithReplicas() throws Exception { + triggerRollingRestartForRemoteMigration(randomIntBetween(1, 2)); + internalCluster().stopAllNodes(); + } + + /** + * Helper method to run a rolling restart for migration to remote backed cluster + */ + private void triggerRollingRestartForRemoteMigration(int replicaCount) throws Exception { + internalCluster().startClusterManagerOnlyNodes(3); + internalCluster().startNodes(numOfNodes - 3); + + // create index + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, replicaCount) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 10)) + .build(); + createIndex(indexName, indexSettings); + ensureGreen(indexName); + indexBulk(indexName, randomIntBetween(100, 10000)); + refresh(indexName); + indexBulk(indexName, randomIntBetween(100, 10000)); + + initDocRepToRemoteMigration(); + + // rolling restart + final Settings remoteNodeAttributes = remoteStoreClusterSettings( + REPOSITORY_NAME, + segmentRepoPath, + REPOSITORY_2_NAME, + translogRepoPath + ); + internalCluster().rollingRestart(new InternalTestCluster.RestartCallback() { + // Update remote attributes + @Override + public Settings onNodeStopped(String nodeName) { + return remoteNodeAttributes; + } + }); + ensureStableCluster(numOfNodes); + ensureGreen(TimeValue.timeValueSeconds(90), indexName); + assertEquals(internalCluster().size(), numOfNodes); + + // Assert on remote uploads + Map shardStatsMap = internalCluster().client().admin().indices().prepareStats(indexName).get().asMap(); + DiscoveryNodes discoveryNodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + shardStatsMap.forEach((shardRouting, shardStats) -> { + if (discoveryNodes.get(shardRouting.currentNodeId()).isRemoteStoreNode() && shardRouting.primary()) { + RemoteSegmentStats remoteSegmentStats = shardStats.getStats().getSegments().getRemoteSegmentStats(); + assertTrue(remoteSegmentStats.getTotalUploadTime() > 0); + assertTrue(remoteSegmentStats.getUploadBytesSucceeded() > 0); + } + }); + + // Assert on new remote uploads after seeding + indexBulk(indexName, randomIntBetween(100, 10000)); + refresh(indexName); + indexBulk(indexName, randomIntBetween(100, 10000)); + Map newShardStatsMap = internalCluster().client().admin().indices().prepareStats(indexName).get().asMap(); + newShardStatsMap.forEach((shardRouting, shardStats) -> { + if (discoveryNodes.get(shardRouting.currentNodeId()).isRemoteStoreNode() && shardRouting.primary()) { + RemoteSegmentStats prevRemoteSegmentStats = shardStatsMap.get(shardRouting) + .getStats() + .getSegments() + .getRemoteSegmentStats(); + RemoteSegmentStats newRemoteSegmentStats = shardStats.getStats().getSegments().getRemoteSegmentStats(); + assertTrue(newRemoteSegmentStats.getTotalUploadTime() > prevRemoteSegmentStats.getTotalUploadTime()); + assertTrue(newRemoteSegmentStats.getUploadBytesSucceeded() > prevRemoteSegmentStats.getUploadBytesSucceeded()); + + RemoteTranslogStats prevRemoteTranslogStats = shardStatsMap.get(shardRouting) + .getStats() + .getTranslog() + .getRemoteTranslogStats(); + RemoteTranslogStats newRemoteTranslogStats = shardStats.getStats().getTranslog().getRemoteTranslogStats(); + assertTrue(newRemoteTranslogStats.getUploadBytesSucceeded() > prevRemoteTranslogStats.getUploadBytesSucceeded()); + } + }); + } +} diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index a7b29314210df..14c6cecb1f847 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -500,13 +500,14 @@ public synchronized IndexShard createShard( if (this.indexSettings.isRemoteStoreEnabled()) { remoteDirectory = remoteDirectoryFactory.newDirectory(this.indexSettings, path); } else { - if (sourceNode != null && sourceNode.isRemoteStoreNode() == false) { + if (sourceNode == null || sourceNode.isRemoteStoreNode() == false) { if (routing.primary() == false) { throw new IllegalStateException("Can't migrate a remote shard to replica before primary " + routing.shardId()); } logger.info("DocRep shard {} is migrating to remote", shardId); seedRemote = true; } + remoteDirectory = ((RemoteSegmentStoreDirectoryFactory) remoteDirectoryFactory).newDirectory( RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(this.indexSettings.getNodeSettings()), this.indexSettings.getUUID(), diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 39b1cc130d6a5..6697991aef90b 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -1106,12 +1106,11 @@ private ReplicationGroup calculateReplicationGroup() { } else { newVersion = replicationGroup.getVersion() + 1; } - assert indexSettings.isRemoteTranslogStoreEnabled() - // Handle migration cases. Ignore assertion if any of the shard copies in the replication group is assigned to a remote node - || (replicationGroup != null - && replicationGroup.getReplicationTargets() - .stream() - .anyMatch(shardRouting -> isShardOnRemoteEnabledNode.apply(shardRouting.currentNodeId()))) + assert newVersion == 0 || indexSettings.isRemoteTranslogStoreEnabled() + // Handle migration cases. Ignore assertion if any of the shard copies in the replication group is assigned to a remote node + || replicationGroup.getReplicationTargets() + .stream() + .anyMatch(shardRouting -> isShardOnRemoteEnabledNode.apply(shardRouting.currentNodeId())) || checkpoints.entrySet().stream().filter(e -> e.getValue().tracked).allMatch(e -> e.getValue().replicated) : "In absence of remote translog store, all tracked shards must have replication mode as LOGICAL_REPLICATION"; diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index cbf10c9e4e552..18d4a2ca6d639 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -436,7 +436,7 @@ public IndexShard( logger.debug("state: [CREATED]"); this.checkIndexOnStartup = indexSettings.getValue(IndexSettings.INDEX_CHECK_ON_STARTUP); - this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, bigArrays, nodeId); + this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, bigArrays, nodeId, seedRemote); final String aId = shardRouting.allocationId().getId(); final long primaryTerm = indexSettings.getIndexMetadata().primaryTerm(shardId.id()); this.pendingPrimaryTerm = primaryTerm; @@ -5000,7 +5000,8 @@ public void syncTranslogFilesFromRemoteTranslog() throws IOException { shardPath().resolveTranslog(), indexSettings.getRemoteStorePathStrategy(), remoteStoreSettings, - logger + logger, + shouldSeedRemoteStore() ); } diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index e130f50e105d5..8d689e8769728 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -650,6 +650,14 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe indexShard.recoveryState().getIndex().setFileDetailsComplete(); } indexShard.openEngineAndRecoverFromTranslog(); + if (indexShard.shouldSeedRemoteStore()) { + indexShard.getThreadPool().executor(ThreadPool.Names.GENERIC).execute(() -> { + logger.info("Attempting to seed Remote Store via local recovery for {}", indexShard.shardId()); + indexShard.refresh("remote store migration"); + }); + indexShard.waitForRemoteStoreSync(); + logger.info("Remote Store is now seeded via local recovery for {}", indexShard.shardId()); + } indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); indexShard.postRecovery("post recovery from shard_store"); diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 8c01791af2c9e..c7f756957076c 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -120,7 +120,7 @@ public RemoteFsTranslog( remoteStoreSettings ); try { - download(translogTransferManager, location, logger); + download(translogTransferManager, location, logger, config.shouldSeedRemote()); Checkpoint checkpoint = readCheckpoint(location); logger.info("Downloaded data from remote translog till maxSeqNo = {}", checkpoint.maxSeqNo); this.readers.addAll(recoverFromFiles(checkpoint)); @@ -168,7 +168,8 @@ public static void download( Path location, RemoteStorePathStrategy pathStrategy, RemoteStoreSettings remoteStoreSettings, - Logger logger + Logger logger, + boolean seedRemote ) throws IOException { assert repository instanceof BlobStoreRepository : String.format( Locale.ROOT, @@ -189,11 +190,12 @@ public static void download( pathStrategy, remoteStoreSettings ); - RemoteFsTranslog.download(translogTransferManager, location, logger); + RemoteFsTranslog.download(translogTransferManager, location, logger, seedRemote); logger.trace(remoteTranslogTransferTracker.toString()); } - static void download(TranslogTransferManager translogTransferManager, Path location, Logger logger) throws IOException { + static void download(TranslogTransferManager translogTransferManager, Path location, Logger logger, boolean seedRemote) + throws IOException { /* In Primary to Primary relocation , there can be concurrent upload and download of translog. While translog files are getting downloaded by new primary, it might hence be deleted by the primary @@ -206,7 +208,7 @@ static void download(TranslogTransferManager translogTransferManager, Path locat boolean success = false; long startTimeMs = System.currentTimeMillis(); try { - downloadOnce(translogTransferManager, location, logger); + downloadOnce(translogTransferManager, location, logger, seedRemote); success = true; return; } catch (FileNotFoundException | NoSuchFileException e) { @@ -220,7 +222,8 @@ static void download(TranslogTransferManager translogTransferManager, Path locat throw ex; } - private static void downloadOnce(TranslogTransferManager translogTransferManager, Path location, Logger logger) throws IOException { + private static void downloadOnce(TranslogTransferManager translogTransferManager, Path location, Logger logger, boolean seedRemote) + throws IOException { logger.debug("Downloading translog files from remote"); RemoteTranslogTransferTracker statsTracker = translogTransferManager.getRemoteTranslogTransferTracker(); long prevDownloadBytesSucceeded = statsTracker.getDownloadBytesSucceeded(); @@ -262,7 +265,9 @@ private static void downloadOnce(TranslogTransferManager translogTransferManager logger.debug("No translog files found on remote, checking local filesystem for cleanup"); if (FileSystemUtils.exists(location.resolve(CHECKPOINT_FILE_NAME))) { final Checkpoint checkpoint = readCheckpoint(location); - if (isEmptyTranslog(checkpoint) == false) { + if (seedRemote) { + logger.debug("Remote migration ongoing. Retaining the translog on local, skipping clean-up"); + } else if (isEmptyTranslog(checkpoint) == false) { logger.debug("Translog files exist on local without any metadata in remote, cleaning up these files"); // Creating empty translog will cleanup the older un-referenced tranlog files, we don't have to explicitly delete Translog.createEmptyTranslog(location, translogTransferManager.getShardId(), checkpoint); diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java b/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java index 2f00773075d41..f720f041b287c 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java @@ -59,6 +59,7 @@ public final class TranslogConfig { private final Path translogPath; private final ByteSizeValue bufferSize; private final String nodeId; + private final boolean seedRemote; /** * Creates a new TranslogConfig instance @@ -66,9 +67,17 @@ public final class TranslogConfig { * @param translogPath the path to use for the transaction log files * @param indexSettings the index settings used to set internal variables * @param bigArrays a bigArrays instance used for temporarily allocating write operations + * @param seedRemote boolean denoting whether remote store needs to be seeded as part of remote migration */ - public TranslogConfig(ShardId shardId, Path translogPath, IndexSettings indexSettings, BigArrays bigArrays, String nodeId) { - this(shardId, translogPath, indexSettings, bigArrays, DEFAULT_BUFFER_SIZE, nodeId); + public TranslogConfig( + ShardId shardId, + Path translogPath, + IndexSettings indexSettings, + BigArrays bigArrays, + String nodeId, + boolean seedRemote + ) { + this(shardId, translogPath, indexSettings, bigArrays, DEFAULT_BUFFER_SIZE, nodeId, seedRemote); } TranslogConfig( @@ -77,7 +86,8 @@ public TranslogConfig(ShardId shardId, Path translogPath, IndexSettings indexSet IndexSettings indexSettings, BigArrays bigArrays, ByteSizeValue bufferSize, - String nodeId + String nodeId, + boolean seedRemote ) { this.bufferSize = bufferSize; this.indexSettings = indexSettings; @@ -85,6 +95,7 @@ public TranslogConfig(ShardId shardId, Path translogPath, IndexSettings indexSet this.translogPath = translogPath; this.bigArrays = bigArrays; this.nodeId = nodeId; + this.seedRemote = seedRemote; } /** @@ -125,4 +136,8 @@ public ByteSizeValue getBufferSize() { public String getNodeId() { return nodeId; } + + public boolean shouldSeedRemote() { + return seedRemote; + } } diff --git a/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java b/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java index 25fcdc614172a..cf6c9087777fe 100644 --- a/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java +++ b/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java @@ -195,7 +195,8 @@ private boolean isTranslogClean(ShardPath shardPath, ClusterState clusterState, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE, - "" + "", + false ); long primaryTerm = indexSettings.getIndexMetadata().primaryTerm(shardPath.getShardId().id()); // We open translog to check for corruption, do not clean anything. diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index cc927a19fd01a..74aef54987842 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -4002,7 +4002,7 @@ public void testRecoverFromForeignTranslog() throws IOException { final Path badTranslogLog = createTempDir(); final String badUUID = Translog.createEmptyTranslog(badTranslogLog, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); Translog translog = new LocalTranslog( - new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), + new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, "", false), badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED, @@ -4020,7 +4020,8 @@ public void testRecoverFromForeignTranslog() throws IOException { translog.location(), config.getIndexSettings(), BigArrays.NON_RECYCLING_INSTANCE, - "" + "", + false ); EngineConfig brokenConfig = new EngineConfig.Builder().shardId(shardId) @@ -7714,7 +7715,8 @@ public void testNotWarmUpSearcherInEngineCtor() throws Exception { createTempDir(), config.getTranslogConfig().getIndexSettings(), config.getTranslogConfig().getBigArrays(), - "" + "", + false ); EngineConfig configWithWarmer = new EngineConfig.Builder().shardId(config.getShardId()) .threadPool(config.getThreadPool()) diff --git a/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java index a45b25f04060b..8a77fbca2915d 100644 --- a/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java @@ -134,7 +134,8 @@ public void setupListeners() throws Exception { createTempDir("translog"), indexSettings, BigArrays.NON_RECYCLING_INSTANCE, - "" + "", + false ); Engine.EventListener eventListener = new Engine.EventListener() { @Override diff --git a/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java b/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java index c098d11a3487f..c27d0367abf68 100644 --- a/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java @@ -38,7 +38,7 @@ public void testRecoveryFromTranslog() throws IOException { LocalCheckpointTracker tracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); try { translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, "", false), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -68,7 +68,7 @@ public void testRecoveryFromTranslog() throws IOException { translogManager.syncTranslog(); translogManager.close(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, "", false), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -117,7 +117,7 @@ public void testTranslogRollsGeneration() throws IOException { LocalCheckpointTracker tracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); try { translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, "", false), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -147,7 +147,7 @@ public void testTranslogRollsGeneration() throws IOException { translogManager.syncTranslog(); translogManager.close(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, "", false), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -182,7 +182,7 @@ public void testTrimOperationsFromTranslog() throws IOException { LocalCheckpointTracker tracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); try { translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, "", false), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -214,7 +214,7 @@ public void testTrimOperationsFromTranslog() throws IOException { translogManager.close(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, "", false), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -253,7 +253,7 @@ public void testTranslogSync() throws IOException { ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); AtomicReference translogManagerAtomicReference = new AtomicReference<>(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, "", false), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), diff --git a/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java index 4997067b75198..cae27d5b259c4 100644 --- a/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java @@ -291,7 +291,7 @@ private TranslogConfig getTranslogConfig(final Path path, final Settings setting ); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings); - return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize, ""); + return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize, "", false); } private Location addToTranslogAndList(Translog translog, List list, Translog.Operation op) throws IOException { @@ -1453,7 +1453,8 @@ public void testTranslogWriterCanFlushInAddOrReadCall() throws IOException { temp.getIndexSettings(), temp.getBigArrays(), new ByteSizeValue(1, ByteSizeUnit.KB), - "" + "", + false ); final Set persistedSeqNos = new HashSet<>(); @@ -1552,7 +1553,8 @@ public void testTranslogWriterFsyncedWithLocalTranslog() throws IOException { temp.getIndexSettings(), temp.getBigArrays(), new ByteSizeValue(1, ByteSizeUnit.KB), - "" + "", + false ); final Set persistedSeqNos = new HashSet<>(); diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 28979a3dc4f28..6bf35cc1eac9b 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -224,7 +224,7 @@ private TranslogConfig getTranslogConfig(final Path path, final Settings setting // To simulate that the node is remote backed Settings nodeSettings = Settings.builder().put("node.attr.remote_store.translog.repository", "my-repo-1").build(); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings, nodeSettings); - return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize, ""); + return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize, "", false); } private BlobStoreRepository createRepository() { @@ -399,7 +399,8 @@ private TranslogConfig getConfig(int gensToKeep) { temp.getIndexSettings(), temp.getBigArrays(), new ByteSizeValue(1, ByteSizeUnit.KB), - "" + "", + false ); return config; } @@ -1561,7 +1562,8 @@ public void testTranslogWriterFsyncDisabledInRemoteFsTranslog() throws IOExcepti temp.getIndexSettings(), temp.getBigArrays(), new ByteSizeValue(1, ByteSizeUnit.KB), - "" + "", + false ); final Set persistedSeqNos = new HashSet<>(); @@ -1692,7 +1694,7 @@ public void testDownloadWithRetries() throws IOException { // Always File not found when(mockTransfer.downloadTranslog(any(), any(), any())).thenThrow(new NoSuchFileException("File not found")); TranslogTransferManager finalMockTransfer = mockTransfer; - assertThrows(NoSuchFileException.class, () -> RemoteFsTranslog.download(finalMockTransfer, location, logger)); + assertThrows(NoSuchFileException.class, () -> RemoteFsTranslog.download(finalMockTransfer, location, logger, false)); // File not found in first attempt . File found in second attempt. mockTransfer = mock(TranslogTransferManager.class); @@ -1713,7 +1715,7 @@ public void testDownloadWithRetries() throws IOException { }).when(mockTransfer).downloadTranslog(any(), any(), any()); // no exception thrown - RemoteFsTranslog.download(mockTransfer, location, logger); + RemoteFsTranslog.download(mockTransfer, location, logger, false); } // No translog data in local as well as remote, we skip creating empty translog @@ -1726,7 +1728,7 @@ public void testDownloadWithNoTranslogInLocalAndRemote() throws IOException { when(mockTransfer.getRemoteTranslogTransferTracker()).thenReturn(remoteTranslogTransferTracker); Path[] filesBeforeDownload = FileSystemUtils.files(location); - RemoteFsTranslog.download(mockTransfer, location, logger); + RemoteFsTranslog.download(mockTransfer, location, logger, false); assertEquals(filesBeforeDownload, FileSystemUtils.files(location)); } @@ -1746,7 +1748,7 @@ public void testDownloadWithTranslogOnlyInLocal() throws IOException { Checkpoint existingCheckpoint = Translog.readCheckpoint(location); TranslogTransferManager finalMockTransfer = mockTransfer; - RemoteFsTranslog.download(finalMockTransfer, location, logger); + RemoteFsTranslog.download(finalMockTransfer, location, logger, false); Path[] filesPostDownload = FileSystemUtils.files(location); assertEquals(2, filesPostDownload.length); @@ -1782,11 +1784,11 @@ public void testDownloadWithEmptyTranslogOnlyInLocal() throws IOException { TranslogTransferManager finalMockTransfer = mockTransfer; // download first time will ensure creating empty translog - RemoteFsTranslog.download(finalMockTransfer, location, logger); + RemoteFsTranslog.download(finalMockTransfer, location, logger, false); Path[] filesPostFirstDownload = FileSystemUtils.files(location); // download on empty translog should be a no-op - RemoteFsTranslog.download(finalMockTransfer, location, logger); + RemoteFsTranslog.download(finalMockTransfer, location, logger, false); Path[] filesPostSecondDownload = FileSystemUtils.files(location); assertArrayEquals(filesPostFirstDownload, filesPostSecondDownload); diff --git a/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java b/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java index e17d2770f014a..a16e607bbf37a 100644 --- a/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java +++ b/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java @@ -74,7 +74,14 @@ protected Translog createTranslog(LongSupplier primaryTermSupplier) throws IOExc } protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSupplier) throws IOException { - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""); + TranslogConfig translogConfig = new TranslogConfig( + shardId, + translogPath, + INDEX_SETTINGS, + BigArrays.NON_RECYCLING_INSTANCE, + "", + false + ); String translogUUID = Translog.createEmptyTranslog( translogPath, SequenceNumbers.NO_OPS_PERFORMED, diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java index 1cb5501810c5d..3403425d89254 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java @@ -527,7 +527,14 @@ protected Translog createTranslog(LongSupplier primaryTermSupplier) throws IOExc } protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSupplier) throws IOException { - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""); + TranslogConfig translogConfig = new TranslogConfig( + shardId, + translogPath, + INDEX_SETTINGS, + BigArrays.NON_RECYCLING_INSTANCE, + "", + false + ); String translogUUID = Translog.createEmptyTranslog( translogPath, SequenceNumbers.NO_OPS_PERFORMED, @@ -877,7 +884,8 @@ public EngineConfig config( translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE, - "" + "", + false ); final List extRefreshListenerList = externalRefreshListener == null ? emptyList() @@ -946,7 +954,14 @@ protected EngineConfig config( .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .build() ); - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE, ""); + TranslogConfig translogConfig = new TranslogConfig( + shardId, + translogPath, + indexSettings, + BigArrays.NON_RECYCLING_INSTANCE, + "", + false + ); return new EngineConfig.Builder().shardId(config.getShardId()) .threadPool(config.getThreadPool()) .indexSettings(indexSettings) From 42d46a9d048b8f8ac07d88b71ede32f6c0d7a261 Mon Sep 17 00:00:00 2001 From: Ashish Singh Date: Thu, 25 Apr 2024 14:07:19 +0530 Subject: [PATCH 36/37] Allow index & cluster default refresh interval setting value to be -1 (#11411) Signed-off-by: Ashish Singh --- .../ClusterIndexRefreshIntervalIT.java | 169 +++++++++++++++--- .../metadata/MetadataCreateIndexService.java | 7 +- .../opensearch/indices/IndicesService.java | 7 +- 3 files changed, 159 insertions(+), 24 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalIT.java index 25fa7ae7eb8eb..f936b53f52a7b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalIT.java @@ -235,33 +235,19 @@ public void testDefaultRefreshIntervalWithUpdateClusterAndIndexSettings() throws } public void testRefreshIntervalDisabled() throws ExecutionException, InterruptedException { - TimeValue clusterMinimumRefreshInterval = client().settings() - .getAsTime(IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.MINUS_ONE); - boolean createIndexSuccess = clusterMinimumRefreshInterval.equals(TimeValue.MINUS_ONE); String clusterManagerName = internalCluster().getClusterManagerName(); List dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); Settings settings = Settings.builder() .put(indexSettings()) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), IndexSettings.MINIMUM_REFRESH_INTERVAL) .build(); - if (createIndexSuccess) { - createIndex(INDEX_NAME, settings); - ensureYellowAndNoInitializingShards(INDEX_NAME); - ensureGreen(INDEX_NAME); - GetIndexResponse getIndexResponse = client(clusterManagerName).admin().indices().getIndex(new GetIndexRequest()).get(); - IndicesService indicesService = internalCluster().getInstance(IndicesService.class, randomFrom(dataNodes)); - String uuid = getIndexResponse.getSettings().get(INDEX_NAME).get(IndexMetadata.SETTING_INDEX_UUID); - IndexService indexService = indicesService.indexService(new Index(INDEX_NAME, uuid)); - assertEquals(IndexSettings.MINIMUM_REFRESH_INTERVAL, indexService.getRefreshTaskInterval()); - } else { - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME, settings)); - assertEquals( - "invalid index.refresh_interval [-1]: cannot be smaller than cluster.minimum.index.refresh_interval [" - + getMinRefreshIntervalForRefreshDisabled() - + "]", - exception.getMessage() - ); - } + createIndex(INDEX_NAME, settings); + ensureGreen(INDEX_NAME); + GetIndexResponse getIndexResponse = client(clusterManagerName).admin().indices().getIndex(new GetIndexRequest()).get(); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, randomFrom(dataNodes)); + String uuid = getIndexResponse.getSettings().get(INDEX_NAME).get(IndexMetadata.SETTING_INDEX_UUID); + IndexService indexService = indicesService.indexService(new Index(INDEX_NAME, uuid)); + assertEquals(IndexSettings.MINIMUM_REFRESH_INTERVAL, indexService.getRefreshTaskInterval()); } protected TimeValue getMinRefreshIntervalForRefreshDisabled() { @@ -366,6 +352,147 @@ public void testClusterMinimumChangeOnIndexWithCustomRefreshInterval() throws Ex assertEquals(customRefreshInterval, indexService.getRefreshTaskInterval()); } + public void testClusterMinimumRefreshIntervalOfMinusOneFails() { + // This test checks that we can not set cluster minimum refresh interval as -1 (or -1ms). + String clusterManagerName = internalCluster().getClusterManagerName(); + String refreshInterval = randomFrom("-1", "-1ms"); + IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), refreshInterval)) + .get() + ); + assertEquals( + "failed to parse value [" + refreshInterval + "] for setting [cluster.minimum.index.refresh_interval], must be >= [0ms]", + ex.getMessage() + ); + } + + public void testClusterMinimumRefreshIntervalOfZero() { + // This test checks that we can set the cluster minimum refresh interval as 0. + String clusterManagerName = internalCluster().getClusterManagerName(); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "0")) + .get(); + } + + public void testDefaultRefreshIntervalOfMinusOneIrrespectiveOfMinimum() { + // This test checks that we are able to set the cluster default refresh interval to one regardless of what the + // minimum is set to. -1 corresponds to no period background refreshes. + String clusterManagerName = internalCluster().getClusterManagerName(); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), randomFrom("0", "1ms", "1s", "10s")) + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), randomFrom("-1", "-1ms")) + ) + .get(); + } + + public void testCreateIndexWithMinusOneRefreshInterval() throws ExecutionException, InterruptedException { + // This test checks that we are able to create index with -1 refresh interval using index settings and default interval both. + String clusterManagerName = internalCluster().getClusterManagerName(); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "10s") + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "10s") + ) + .get(); + + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), randomFrom("-1", "-1ms")) + .build(); + createIndex(INDEX_NAME, indexSettings); + ensureGreen(INDEX_NAME); + + IndexService indexService = getIndexServiceFromRandomDataNode(INDEX_NAME); + assertEquals(-1, indexService.getRefreshTaskInterval().millis()); + + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), randomFrom("-1", "-1ms"))) + .get(); + createIndex(OTHER_INDEX_NAME); + ensureGreen(OTHER_INDEX_NAME); + indexService = getIndexServiceFromRandomDataNode(OTHER_INDEX_NAME); + assertEquals(-1, indexService.getRefreshTaskInterval().millis()); + } + + public void testUpdateIndexWithMinusOneRefreshInterval() throws ExecutionException, InterruptedException { + // This test checks that we are able to update index with -1 refresh interval using index settings and default interval both. + String clusterManagerName = internalCluster().getClusterManagerName(); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "10s") + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "10s") + ) + .get(); + + createIndex(INDEX_NAME); + ensureGreen(INDEX_NAME); + IndexService indexService = getIndexServiceFromRandomDataNode(INDEX_NAME); + assertEquals(10, indexService.getRefreshTaskInterval().seconds()); + + client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), randomFrom("-1", "-1ms")) + ) + ) + .actionGet(); + assertEquals(-1, indexService.getRefreshTaskInterval().millis()); + + client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "100s") + ) + ) + .actionGet(); + assertEquals(100, indexService.getRefreshTaskInterval().seconds()); + + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), randomFrom("-1", "-1ms"))) + .get(); + + client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().putNull(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey()) + ) + ) + .actionGet(); + assertEquals(-1, indexService.getRefreshTaskInterval().millis()); + } + + private IndexService getIndexServiceFromRandomDataNode(String indexName) throws ExecutionException, InterruptedException { + String clusterManagerName = internalCluster().getClusterManagerName(); + List dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); + GetIndexResponse getIndexResponse = client(clusterManagerName).admin().indices().getIndex(new GetIndexRequest()).get(); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, randomFrom(dataNodes)); + String uuid = getIndexResponse.getSettings().get(indexName).get(IndexMetadata.SETTING_INDEX_UUID); + return indicesService.indexService(new Index(indexName, uuid)); + } + protected TimeValue getDefaultRefreshInterval() { return IndexSettings.DEFAULT_REFRESH_INTERVAL; } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 0eba4d241f0fd..b31985a260361 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -1636,10 +1636,15 @@ public static void validateTranslogRetentionSettings(Settings indexSettings) { * @param clusterSettings cluster setting */ public static void validateRefreshIntervalSettings(Settings requestSettings, ClusterSettings clusterSettings) { - if (IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.exists(requestSettings) == false) { + if (IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.exists(requestSettings) == false + || requestSettings.get(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey()) == null) { return; } TimeValue requestRefreshInterval = IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.get(requestSettings); + // If the refresh interval supplied is -1, we allow the index to be created because -1 means no periodic refresh. + if (requestRefreshInterval.millis() == -1) { + return; + } TimeValue clusterMinimumRefreshInterval = clusterSettings.get(IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING); if (requestRefreshInterval.millis() < clusterMinimumRefreshInterval.millis()) { throw new IllegalArgumentException( diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 0187a9fb3b8ba..fd7d897a0e99c 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -273,8 +273,8 @@ public class IndicesService extends AbstractLifecycleComponent */ public static final Setting CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING = Setting.timeSetting( "cluster.minimum.index.refresh_interval", - IndexSettings.MINIMUM_REFRESH_INTERVAL, - IndexSettings.MINIMUM_REFRESH_INTERVAL, + TimeValue.ZERO, + TimeValue.ZERO, new ClusterMinimumRefreshIntervalValidator(), Property.NodeScope, Property.Dynamic @@ -2009,6 +2009,9 @@ public Iterator> settings() { * @param defaultRefreshInterval value of cluster default index refresh interval setting */ private static void validateRefreshIntervalSettings(TimeValue minimumRefreshInterval, TimeValue defaultRefreshInterval) { + if (defaultRefreshInterval.millis() < 0) { + return; + } if (minimumRefreshInterval.compareTo(defaultRefreshInterval) > 0) { throw new IllegalArgumentException( "cluster minimum index refresh interval [" From 5bf522cce3dded6ecf6167ce53f1f56e3771edd5 Mon Sep 17 00:00:00 2001 From: Lakshya Taragi <157457166+ltaragi@users.noreply.github.com> Date: Thu, 25 Apr 2024 16:21:16 +0530 Subject: [PATCH 37/37] Take shallow copy snapshot when remote store migration is in progress (#13309) Signed-off-by: Lakshya Taragi --- .../RemoteStoreMigrationSettingsUpdateIT.java | 81 ++-------------- ...eMigrationShardAllocationBaseTestCase.java | 96 +++++++++++++++++++ .../RemoteStoreMigrationTestCase.java | 51 ++++++++++ .../snapshots/SnapshotsService.java | 9 ++ 4 files changed, 165 insertions(+), 72 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java index c3720e6fbbd09..b71f7d7cf7e4a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java @@ -8,27 +8,15 @@ package org.opensearch.remotemigration; -import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.client.Client; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.index.IndexSettings; -import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.snapshots.SnapshotInfo; -import org.opensearch.snapshots.SnapshotState; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; import java.util.Optional; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; -import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode.MIXED; import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode.STRICT; import static org.opensearch.node.remotestore.RemoteStoreNodeService.Direction.REMOTE_STORE; @@ -92,13 +80,7 @@ public void testNewRestoredIndexIsRemoteStoreBackedForRemoteStoreDirectionAndMix assertNodeInCluster(remoteNodeName); logger.info("Create a non remote-backed index"); - client.admin() - .indices() - .prepareCreate(TEST_INDEX) - .setSettings( - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build() - ) - .get(); + createIndex(TEST_INDEX, 0); logger.info("Verify that non remote stored backed index is created"); assertNonRemoteStoreBackedIndex(TEST_INDEX); @@ -115,21 +97,12 @@ public void testNewRestoredIndexIsRemoteStoreBackedForRemoteStoreDirectionAndMix logger.info("Create snapshot of non remote stored backed index"); - SnapshotInfo snapshotInfo = client().admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName) - .setIndices(TEST_INDEX) - .setWaitForCompletion(true) - .get() - .getSnapshotInfo(); - - assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); - assertTrue(snapshotInfo.successfulShards() > 0); - assertEquals(0, snapshotInfo.failedShards()); + createSnapshot(snapshotRepoName, snapshotName, TEST_INDEX); logger.info("Restore index from snapshot under NONE direction"); String restoredIndexName1 = TEST_INDEX + "-restored1"; restoreSnapshot(snapshotRepoName, snapshotName, restoredIndexName1); + ensureGreen(restoredIndexName1); logger.info("Verify that restored index is non remote-backed"); assertNonRemoteStoreBackedIndex(restoredIndexName1); @@ -138,6 +111,7 @@ public void testNewRestoredIndexIsRemoteStoreBackedForRemoteStoreDirectionAndMix setDirection(REMOTE_STORE.direction); String restoredIndexName2 = TEST_INDEX + "-restored2"; restoreSnapshot(snapshotRepoName, snapshotName, restoredIndexName2); + ensureGreen(restoredIndexName2); logger.info("Verify that restored index is non remote-backed"); assertRemoteStoreBackedIndex(restoredIndexName2); @@ -146,10 +120,10 @@ public void testNewRestoredIndexIsRemoteStoreBackedForRemoteStoreDirectionAndMix // compatibility mode setting test public void testSwitchToStrictMode() throws Exception { - logger.info(" --> initialize cluster"); + logger.info("Initialize cluster"); initializeCluster(false); - logger.info(" --> create a mixed mode cluster"); + logger.info("Create a mixed mode cluster"); setClusterMode(MIXED.mode); addRemote = true; String remoteNodeName = internalCluster().startNode(); @@ -159,58 +133,21 @@ public void testSwitchToStrictMode() throws Exception { assertNodeInCluster(remoteNodeName); assertNodeInCluster(nonRemoteNodeName); - logger.info(" --> attempt switching to strict mode"); + logger.info("Attempt switching to strict mode"); SettingsException exception = assertThrows(SettingsException.class, () -> setClusterMode(STRICT.mode)); assertEquals( "can not switch to STRICT compatibility mode when the cluster contains both remote and non-remote nodes", exception.getMessage() ); - logger.info(" --> stop remote node so that cluster had only non-remote nodes"); + logger.info("Stop remote node so that cluster had only non-remote nodes"); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(remoteNodeName)); ensureStableCluster(2); - logger.info(" --> attempt switching to strict mode"); + logger.info("Attempt switching to strict mode"); setClusterMode(STRICT.mode); } - // restore indices from a snapshot - private void restoreSnapshot(String snapshotRepoName, String snapshotName, String restoredIndexName) { - RestoreSnapshotResponse restoreSnapshotResponse = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName) - .setWaitForCompletion(false) - .setIndices(TEST_INDEX) - .setRenamePattern(TEST_INDEX) - .setRenameReplacement(restoredIndexName) - .get(); - - assertEquals(restoreSnapshotResponse.status(), RestStatus.ACCEPTED); - ensureGreen(restoredIndexName); - } - - // verify that the created index is not remote store backed - private void assertNonRemoteStoreBackedIndex(String indexName) { - Settings indexSettings = client.admin().indices().prepareGetIndex().execute().actionGet().getSettings().get(indexName); - assertEquals(ReplicationType.DOCUMENT.toString(), indexSettings.get(SETTING_REPLICATION_TYPE)); - assertNull(indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); - assertNull(indexSettings.get(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY)); - assertNull(indexSettings.get(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY)); - } - - // verify that the created index is remote store backed - private void assertRemoteStoreBackedIndex(String indexName) { - Settings indexSettings = client.admin().indices().prepareGetIndex().execute().actionGet().getSettings().get(indexName); - assertEquals(ReplicationType.SEGMENT.toString(), indexSettings.get(SETTING_REPLICATION_TYPE)); - assertEquals("true", indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); - assertEquals(REPOSITORY_NAME, indexSettings.get(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY)); - assertEquals(REPOSITORY_2_NAME, indexSettings.get(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY)); - assertEquals( - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, - INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(indexSettings) - ); - } - // bootstrap a cluster private void initializeCluster(boolean remoteClusterManager) { addRemote = remoteClusterManager; diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationShardAllocationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationShardAllocationBaseTestCase.java index ad2302d1ab2e1..ffcab9483485d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationShardAllocationBaseTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationShardAllocationBaseTestCase.java @@ -9,15 +9,27 @@ package org.opensearch.remotemigration; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.settings.Settings; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.IndexSettings; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.snapshots.SnapshotInfo; +import org.opensearch.snapshots.SnapshotState; import java.util.Map; import java.util.Optional; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -98,4 +110,88 @@ protected ShardRouting getShardRouting(boolean isPrimary) { return (isPrimary ? table.primaryShard() : table.replicaShards().get(0)); } + // create a snapshot + public static SnapshotInfo createSnapshot(String snapshotRepoName, String snapshotName, String... indices) { + SnapshotInfo snapshotInfo = internalCluster().client() + .admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName) + .setIndices(indices) + .setWaitForCompletion(true) + .get() + .getSnapshotInfo(); + + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertTrue(snapshotInfo.successfulShards() > 0); + assertEquals(0, snapshotInfo.failedShards()); + return snapshotInfo; + } + + // create new index + public static void createIndex(String indexName, int replicaCount) { + assertAcked( + internalCluster().client() + .admin() + .indices() + .prepareCreate(indexName) + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, replicaCount) + .build() + ) + .get() + ); + } + + // restore indices from a snapshot + public static RestoreSnapshotResponse restoreSnapshot(String snapshotRepoName, String snapshotName, String restoredIndexName) { + RestoreSnapshotResponse restoreSnapshotResponse = internalCluster().client() + .admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName) + .setWaitForCompletion(false) + .setIndices(TEST_INDEX) + .setRenamePattern(TEST_INDEX) + .setRenameReplacement(restoredIndexName) + .get(); + assertEquals(restoreSnapshotResponse.status(), RestStatus.ACCEPTED); + return restoreSnapshotResponse; + } + + // verify that the created index is not remote store backed + public static void assertNonRemoteStoreBackedIndex(String indexName) { + Settings indexSettings = internalCluster().client() + .admin() + .indices() + .prepareGetIndex() + .execute() + .actionGet() + .getSettings() + .get(indexName); + assertEquals(ReplicationType.DOCUMENT.toString(), indexSettings.get(SETTING_REPLICATION_TYPE)); + assertNull(indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); + assertNull(indexSettings.get(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY)); + assertNull(indexSettings.get(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY)); + } + + // verify that the created index is remote store backed + public static void assertRemoteStoreBackedIndex(String indexName) { + Settings indexSettings = internalCluster().client() + .admin() + .indices() + .prepareGetIndex() + .execute() + .actionGet() + .getSettings() + .get(indexName); + assertEquals(ReplicationType.SEGMENT.toString(), indexSettings.get(SETTING_REPLICATION_TYPE)); + assertEquals("true", indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); + assertEquals(REPOSITORY_NAME, indexSettings.get(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY)); + assertEquals(REPOSITORY_2_NAME, indexSettings.get(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY)); + assertEquals( + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(indexSettings) + ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java index 640b83f194c1c..7d816a5e18698 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java @@ -13,8 +13,11 @@ import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.client.Client; import org.opensearch.common.settings.Settings; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.test.OpenSearchIntegTestCase; +import java.nio.file.Path; import java.util.List; import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; @@ -70,4 +73,52 @@ public void testMigrationDirections() { updateSettingsRequest.persistentSettings(Settings.builder().put(MIGRATION_DIRECTION_SETTING.getKey(), "random")); assertThrows(IllegalArgumentException.class, () -> client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); } + + public void testNoShallowSnapshotInMixedMode() throws Exception { + logger.info("Initialize remote cluster"); + addRemote = true; + internalCluster().setBootstrapClusterManagerNodeIndex(0); + List cmNodes = internalCluster().startNodes(1); + Client client = internalCluster().client(cmNodes.get(0)); + + logger.info("Add remote node"); + internalCluster().startNode(); + internalCluster().validateClusterFormed(); + + logger.info("Create remote backed index"); + RemoteStoreMigrationShardAllocationBaseTestCase.createIndex("test", 0); + RemoteStoreMigrationShardAllocationBaseTestCase.assertRemoteStoreBackedIndex("test"); + + logger.info("Create shallow snapshot setting enabled repo"); + String shallowSnapshotRepoName = "shallow-snapshot-repo-name"; + Path shallowSnapshotRepoPath = randomRepoPath(); + assertAcked( + clusterAdmin().preparePutRepository(shallowSnapshotRepoName) + .setType("fs") + .setSettings( + Settings.builder() + .put("location", shallowSnapshotRepoPath) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE) + ) + ); + + logger.info("Verify shallow snapshot creation"); + final String snapshot1 = "snapshot1"; + SnapshotInfo snapshotInfo1 = RemoteStoreMigrationShardAllocationBaseTestCase.createSnapshot( + shallowSnapshotRepoName, + snapshot1, + "test" + ); + assertEquals(snapshotInfo1.isRemoteStoreIndexShallowCopyEnabled(), true); + + logger.info("Set MIXED compatibility mode"); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed")); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + logger.info("Verify that new snapshot is not shallow"); + final String snapshot2 = "snapshot2"; + SnapshotInfo snapshotInfo2 = RemoteStoreMigrationShardAllocationBaseTestCase.createSnapshot(shallowSnapshotRepoName, snapshot2); + assertEquals(snapshotInfo2.isRemoteStoreIndexShallowCopyEnabled(), false); + } } diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 71918bc73b55a..22b640963e896 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -131,6 +131,8 @@ import static java.util.Collections.emptySet; import static java.util.Collections.unmodifiableList; import static org.opensearch.cluster.SnapshotsInProgress.completed; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.opensearch.repositories.blobstore.BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY; import static org.opensearch.snapshots.SnapshotUtils.validateSnapshotsBackingAnyIndex; @@ -343,6 +345,13 @@ public ClusterState execute(ClusterState currentState) { } boolean remoteStoreIndexShallowCopy = REMOTE_STORE_INDEX_SHALLOW_COPY.get(repository.getMetadata().settings()); + logger.debug("remote_store_index_shallow_copy setting is set as [{}]", remoteStoreIndexShallowCopy); + if (remoteStoreIndexShallowCopy + && clusterService.getClusterSettings().get(REMOTE_STORE_COMPATIBILITY_MODE_SETTING).equals(CompatibilityMode.MIXED)) { + // don't allow shallow snapshots if compatibility mode is not strict + logger.warn("Shallow snapshots are not supported during migration. Falling back to full snapshot."); + remoteStoreIndexShallowCopy = false; + } newEntry = SnapshotsInProgress.startedEntry( new Snapshot(repositoryName, snapshotId), request.includeGlobalState(),