diff --git a/CHANGELOG.md b/CHANGELOG.md index 291ecd71b975d..33f8ca22b60f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -103,22 +103,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added -- [Tiered caching] Introducing cache plugins and exposing Ehcache as one of the pluggable disk cache option ([#11874](https://github.com/opensearch-project/OpenSearch/pull/11874)) -- Add support for dependencies in plugin descriptor properties with semver range ([#11441](https://github.com/opensearch-project/OpenSearch/pull/11441)) -- Add community_id ingest processor ([#12121](https://github.com/opensearch-project/OpenSearch/pull/12121)) -- Introduce query level setting `index.query.max_nested_depth` limiting nested queries ([#3268](https://github.com/opensearch-project/OpenSearch/issues/3268) -- Add toString methods to MultiSearchRequest, MultiGetRequest and CreateIndexRequest ([#12163](https://github.com/opensearch-project/OpenSearch/pull/12163)) -- Support for returning scores in matched queries ([#11626](https://github.com/opensearch-project/OpenSearch/pull/11626)) -- Add shard id property to SearchLookup for use in field types provided by plugins ([#1063](https://github.com/opensearch-project/OpenSearch/pull/1063)) -- Force merge API supports performing on primary shards only ([#11269](https://github.com/opensearch-project/OpenSearch/pull/11269)) -- [Tiered caching] Make IndicesRequestCache implementation configurable [EXPERIMENTAL] ([#12533](https://github.com/opensearch-project/OpenSearch/pull/12533)) -- Add kuromoji_completion analyzer and filter ([#4835](https://github.com/opensearch-project/OpenSearch/issues/4835)) -- The org.opensearch.bootstrap.Security should support codebase for JAR files with classifiers ([#12586](https://github.com/opensearch-project/OpenSearch/issues/12586)) -- [Metrics Framework] Adds support for asynchronous gauge metric type. ([#12642](https://github.com/opensearch-project/OpenSearch/issues/12642)) -- Make search query counters dynamic to support all query types ([#12601](https://github.com/opensearch-project/OpenSearch/pull/12601)) -- [Tiered caching] Add policies controlling which values can enter pluggable caches [EXPERIMENTAL] ([#12542](https://github.com/opensearch-project/OpenSearch/pull/12542)) -- [Tiered caching] Add Stale keys Management and CacheCleaner to IndicesRequestCache ([#12625](https://github.com/opensearch-project/OpenSearch/pull/12625)) +- Convert ingest processor supports ip type ([#12818](https://github.com/opensearch-project/OpenSearch/pull/12818)) - Add a counter to node stat api to track shard going from idle to non-idle ([#12768](https://github.com/opensearch-project/OpenSearch/pull/12768)) +- Allow setting KEYSTORE_PASSWORD through env variable ([#12865](https://github.com/opensearch-project/OpenSearch/pull/12865)) - [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697)) - [Tracing Framework] Adds support for inferred sampling ([#12315](https://github.com/opensearch-project/OpenSearch/issues/12315)) diff --git a/distribution/src/bin/opensearch b/distribution/src/bin/opensearch index 947d1167f79f2..8a3b0a009437f 100755 --- a/distribution/src/bin/opensearch +++ b/distribution/src/bin/opensearch @@ -36,14 +36,16 @@ fi # get keystore password before setting java options to avoid # conflicting GC configurations for the keystore tools -unset KEYSTORE_PASSWORD -KEYSTORE_PASSWORD= if [[ $CHECK_KEYSTORE = true ]] \ && bin/opensearch-keystore has-passwd --silent then - if ! read -s -r -p "OpenSearch keystore password: " KEYSTORE_PASSWORD ; then - echo "Failed to read keystore password on console" 1>&2 - exit 1 + if [[ ! -z "${KEYSTORE_PASSWORD}" ]]; then + echo "Using value of KEYSTORE_PASSWORD from the environment" + else + if ! read -s -r -p "OpenSearch keystore password: " KEYSTORE_PASSWORD ; then + echo "Failed to read keystore password on console" 1>&2 + exit 1 + fi fi fi diff --git a/distribution/src/bin/opensearch.bat b/distribution/src/bin/opensearch.bat index cce21504c55b7..b7ecab24165fa 100644 --- a/distribution/src/bin/opensearch.bat +++ b/distribution/src/bin/opensearch.bat @@ -62,14 +62,17 @@ if not exist "%SERVICE_LOG_DIR%" ( mkdir "%SERVICE_LOG_DIR%" ) -SET KEYSTORE_PASSWORD= IF "%checkpassword%"=="Y" ( CALL "%~dp0opensearch-keystore.bat" has-passwd --silent IF !ERRORLEVEL! EQU 0 ( - SET /P KEYSTORE_PASSWORD=OpenSearch keystore password: - IF !ERRORLEVEL! NEQ 0 ( - ECHO Failed to read keystore password on standard input - EXIT /B !ERRORLEVEL! + if defined KEYSTORE_PASSWORD ( + ECHO Using value of KEYSTORE_PASSWORD from the environment + ) else ( + SET /P KEYSTORE_PASSWORD=OpenSearch keystore password: + IF !ERRORLEVEL! NEQ 0 ( + ECHO Failed to read keystore password on standard input + EXIT /B !ERRORLEVEL! + ) ) ) ) diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ConvertProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ConvertProcessor.java index 2a81fa5f4986e..c7b5a8978188f 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ConvertProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ConvertProcessor.java @@ -32,6 +32,7 @@ package org.opensearch.ingest.common; +import org.opensearch.common.network.InetAddresses; import org.opensearch.ingest.AbstractProcessor; import org.opensearch.ingest.ConfigurationUtils; import org.opensearch.ingest.IngestDocument; @@ -118,6 +119,19 @@ public Object convert(Object value) { return value.toString(); } }, + IP { + @Override + public Object convert(Object value) { + // If the value is a valid ipv4/ipv6 address, we return the original value directly because IpFieldType + // can accept string value, this is simpler than we return an InetAddress object which needs to do more + // work such as serialization + if (value instanceof String && InetAddresses.isInetAddress(value.toString())) { + return value; + } else { + throw new IllegalArgumentException("[" + value + "] is not a valid ipv4/ipv6 address"); + } + } + }, AUTO { @Override public Object convert(Object value) { diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java index 0ba0a39261d00..50ece9282888f 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java @@ -550,4 +550,29 @@ public void testTargetField() throws Exception { assertThat(ingestDocument.getFieldValue(fieldName, String.class), equalTo(String.valueOf(randomInt))); assertThat(ingestDocument.getFieldValue(targetField, Integer.class), equalTo(randomInt)); } + + public void testConvertIP() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String validIPString; + if (randomBoolean()) { + validIPString = "1.2.3.4"; + } else { + validIPString = "::1"; + } + String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, validIPString); + + Processor processor = new ConvertProcessor(randomAlphaOfLength(10), null, fieldName, fieldName, Type.IP, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue(fieldName, String.class), equalTo(validIPString)); + + String invalidIPString = randomAlphaOfLength(10); + fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, invalidIPString); + Processor processorWithInvalidIP = new ConvertProcessor(randomAlphaOfLength(10), null, fieldName, fieldName, Type.IP, false); + try { + processorWithInvalidIP.execute(ingestDocument); + fail("processor execute should have failed"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("[" + invalidIPString + "] is not a valid ipv4/ipv6 address")); + } + } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/330_convert_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/330_convert_processor.yml new file mode 100644 index 0000000000000..994ed225dd624 --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/330_convert_processor.yml @@ -0,0 +1,83 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "1" + ignore: 404 + +--- +"Test convert processor with ip type": + - skip: + version: " - 2.13.99" + reason: "introduced in 2.14.0" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "convert" : { + "field" : "raw_ip", + "type": "ip" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /\[1.1.1.\] is not a valid ipv4\/ipv6 address/ + index: + index: test + id: 1 + pipeline: "1" + body: { + raw_ip: "1.1.1." + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "convert" : { + "field" : "raw_ip", + "target_field" : "ip_field", + "type" : "ip", + "ignore_failure" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + raw_ip: "1.1.1." + } + - do: + get: + index: test + id: 1 + - match: { _source: { raw_ip: "1.1.1."} } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + raw_ip: "1.1.1.1" + } + - do: + get: + index: test + id: 1 + - match: { _source: { raw_ip: "1.1.1.1", ip_field: "1.1.1.1"} } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml index fa48820a71a89..07df09225c624 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml @@ -113,12 +113,12 @@ setup: index.number_of_replicas: 0 index.number_of_shards: 6 +# Related issue: https://github.com/opensearch-project/OpenSearch/issues/4845 --- "Returns error if target index's metadata write is blocked": - - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.7.99" + reason: "the bug was fixed in 2.8.0" # block source index's write operations - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml index 426729e737978..67b5be7eb0fd5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml @@ -72,12 +72,12 @@ setup: - match: { _id: "1" } - match: { _source: { foo: "hello world" } } +# Related issue: https://github.com/opensearch-project/OpenSearch/issues/4845 --- "Returns error if target index's metadata write is blocked": - - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.7.99" + reason: "the bug was fixed in 2.8.0" # block source index's write operations - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml index 50c2819eac9d5..096a61a765288 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml @@ -219,12 +219,12 @@ setup: index.number_of_replicas: 0 index.number_of_shards: 6 +# Related issue: https://github.com/opensearch-project/OpenSearch/issues/4845 --- "Returns error if target index's metadata write is blocked": - - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.7.99" + reason: "the bug was fixed in 2.8.0" # block source index's write operations - do: diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index 7e0c1630a76e4..d218f0a985cf3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -84,6 +84,7 @@ import org.opensearch.index.translog.TestTranslog; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogStats; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; @@ -711,9 +712,9 @@ public static final IndexShard newIndexShard( SegmentReplicationCheckpointPublisher.EMPTY, null, null, - () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, nodeId, null, + DefaultRemoteStoreSettings.INSTANCE, false ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index e1997fea3433a..46e5b7aa28318 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -31,6 +31,7 @@ import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.index.translog.Translog.Durability; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; @@ -56,7 +57,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.comparesEqualTo; @@ -189,7 +190,7 @@ public void testStaleCommitDeletionWithInvokeFlush() throws Exception { Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); - int lastNMetadataFilesToKeep = indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles(); + int lastNMetadataFilesToKeep = indexShard.getRemoteStoreSettings().getMinRemoteSegmentMetadataFiles(); // Delete is async. assertBusy(() -> { int actualFileCount = getFileCount(indexPath); @@ -224,7 +225,7 @@ public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { Settings.Builder settings = Settings.builder() - .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "3"); + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "3"); internalCluster().startNode(settings); createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); @@ -243,7 +244,7 @@ public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { public void testStaleCommitDeletionWithMinSegmentFiles_Disabled() throws Exception { Settings.Builder settings = Settings.builder() - .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "-1"); + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "-1"); internalCluster().startNode(settings); createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); @@ -469,7 +470,7 @@ public void testAsyncDurabilityThrowsExceptionWhenRestrictSettingTrue() throws E private void assertClusterRemoteBufferInterval(TimeValue expectedBufferInterval, String dataNode) { IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNode); - assertEquals(expectedBufferInterval, indicesService.getClusterRemoteTranslogBufferInterval()); + assertEquals(expectedBufferInterval, indicesService.getRemoteStoreSettings().getClusterRemoteTranslogBufferInterval()); } private void assertBufferInterval(TimeValue expectedBufferInterval, IndexShard indexShard) { diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 270de65f3e373..a6b0afec07481 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -117,6 +117,7 @@ import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.IndicesRequestCache; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.analysis.HunspellService; import org.opensearch.indices.breaker.BreakerSettings; @@ -297,7 +298,6 @@ public void apply(Settings value, Settings current, Settings previous) { RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING, RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT, - RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, @@ -706,7 +706,6 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteClusterStateService.METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, - IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, @@ -723,7 +722,10 @@ public void apply(Settings value, Settings current, Settings previous) { // Concurrent segment search settings SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING, - SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING + SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING, + + RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, + RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING ) ) ); diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 6ac10a221d49e..3c4cb4fd596c1 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -79,6 +79,7 @@ import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.translog.TranslogFactory; import org.opensearch.indices.IndicesQueryCache; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.recovery.RecoverySettings; @@ -604,8 +605,8 @@ public IndexService newIndexService( IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, BiFunction translogFactorySupplier, Supplier clusterDefaultRefreshIntervalSupplier, - Supplier clusterRemoteTranslogBufferIntervalSupplier, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RemoteStoreSettings remoteStoreSettings ) throws IOException { final IndexEventListener eventListener = freeze(); Function> readerWrapperFactory = indexReaderWrapper @@ -663,8 +664,8 @@ public IndexService newIndexService( recoveryStateFactory, translogFactorySupplier, clusterDefaultRefreshIntervalSupplier, - clusterRemoteTranslogBufferIntervalSupplier, - recoverySettings + recoverySettings, + remoteStoreSettings ); success = true; return indexService; diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 109bda65b1fd8..03cf8f9182211 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -94,6 +94,7 @@ import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogFactory; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; @@ -183,8 +184,8 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final ValuesSourceRegistry valuesSourceRegistry; private final BiFunction translogFactorySupplier; private final Supplier clusterDefaultRefreshIntervalSupplier; - private final Supplier clusterRemoteTranslogBufferIntervalSupplier; private final RecoverySettings recoverySettings; + private final RemoteStoreSettings remoteStoreSettings; public IndexService( IndexSettings indexSettings, @@ -219,8 +220,8 @@ public IndexService( IndexStorePlugin.RecoveryStateFactory recoveryStateFactory, BiFunction translogFactorySupplier, Supplier clusterDefaultRefreshIntervalSupplier, - Supplier clusterRemoteTranslogBufferIntervalSupplier, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RemoteStoreSettings remoteStoreSettings ) { super(indexSettings); this.allowExpensiveQueries = allowExpensiveQueries; @@ -296,8 +297,8 @@ public IndexService( this.globalCheckpointTask = new AsyncGlobalCheckpointTask(this); this.retentionLeaseSyncTask = new AsyncRetentionLeaseSyncTask(this); this.translogFactorySupplier = translogFactorySupplier; - this.clusterRemoteTranslogBufferIntervalSupplier = clusterRemoteTranslogBufferIntervalSupplier; this.recoverySettings = recoverySettings; + this.remoteStoreSettings = remoteStoreSettings; updateFsyncTaskIfNecessary(); } @@ -549,9 +550,9 @@ public synchronized IndexShard createShard( this.indexSettings.isSegRepEnabledOrRemoteNode() ? checkpointPublisher : null, remoteStore, remoteStoreStatsTrackerFactory, - clusterRemoteTranslogBufferIntervalSupplier, nodeEnv.nodeId(), recoverySettings, + remoteStoreSettings, seedRemote ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldSupportedTypes.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldSupportedTypes.java new file mode 100644 index 0000000000000..10b5c4a0f7157 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldSupportedTypes.java @@ -0,0 +1,156 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.document.DoubleField; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.document.KeywordField; +import org.apache.lucene.document.LatLonPoint; +import org.apache.lucene.document.LongField; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.opensearch.Version; +import org.opensearch.common.Booleans; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.network.InetAddresses; + +import java.net.InetAddress; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Contains logic to get the FieldMapper for a given type of derived field. Also, for a given type of derived field, + * it is used to create an IndexableField for the provided type and object. It is useful when indexing into + * lucene MemoryIndex in {@link org.opensearch.index.query.DerivedFieldQuery}. + */ +enum DerivedFieldSupportedTypes { + + BOOLEAN("boolean", (name, context) -> { + BooleanFieldMapper.Builder builder = new BooleanFieldMapper.Builder(name); + return builder.build(context); + }, name -> o -> { + // Trying to mimic the logic for parsing source value as used in BooleanFieldMapper valueFetcher + Boolean value; + if (o instanceof Boolean) { + value = (Boolean) o; + } else { + String textValue = o.toString(); + value = Booleans.parseBooleanStrict(textValue, false); + } + return new Field(name, value ? "T" : "F", BooleanFieldMapper.Defaults.FIELD_TYPE); + }), + DATE("date", (name, context) -> { + // TODO: should we support mapping settings exposed by a given field type from derived fields too? + // for example, support `format` for date type? + DateFieldMapper.Builder builder = new DateFieldMapper.Builder( + name, + DateFieldMapper.Resolution.MILLISECONDS, + DateFieldMapper.getDefaultDateTimeFormatter(), + false, + Version.CURRENT + ); + return builder.build(context); + }, name -> o -> new LongPoint(name, (long) o)), + GEO_POINT("geo_point", (name, context) -> { + GeoPointFieldMapper.Builder builder = new GeoPointFieldMapper.Builder(name); + return builder.build(context); + }, name -> o -> { + // convert o to array of double + if (!(o instanceof List) || ((List) o).size() != 2 || !(((List) o).get(0) instanceof Double)) { + throw new ClassCastException("geo_point should be in format emit(double lat, double lon) for derived fields"); + } + return new LatLonPoint(name, (Double) ((List) o).get(0), (Double) ((List) o).get(1)); + }), + IP("ip", (name, context) -> { + IpFieldMapper.Builder builder = new IpFieldMapper.Builder(name, false, Version.CURRENT); + return builder.build(context); + }, name -> o -> { + InetAddress address; + if (o instanceof InetAddress) { + address = (InetAddress) o; + } else { + address = InetAddresses.forString(o.toString()); + } + return new InetAddressPoint(name, address); + }), + KEYWORD("keyword", (name, context) -> { + FieldType dummyFieldType = new FieldType(); + dummyFieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + KeywordFieldMapper.Builder keywordBuilder = new KeywordFieldMapper.Builder(name); + KeywordFieldMapper.KeywordFieldType keywordFieldType = keywordBuilder.buildFieldType(context, dummyFieldType); + keywordFieldType.setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); + return new KeywordFieldMapper( + name, + dummyFieldType, + keywordFieldType, + keywordBuilder.multiFieldsBuilder.build(keywordBuilder, context), + keywordBuilder.copyTo.build(), + keywordBuilder + ); + }, name -> o -> new KeywordField(name, (String) o, Field.Store.NO)), + LONG("long", (name, context) -> { + NumberFieldMapper.Builder longBuilder = new NumberFieldMapper.Builder(name, NumberFieldMapper.NumberType.LONG, false, false); + return longBuilder.build(context); + }, name -> o -> new LongField(name, Long.parseLong(o.toString()), Field.Store.NO)), + DOUBLE("double", (name, context) -> { + NumberFieldMapper.Builder doubleBuilder = new NumberFieldMapper.Builder(name, NumberFieldMapper.NumberType.DOUBLE, false, false); + return doubleBuilder.build(context); + }, name -> o -> new DoubleField(name, Double.parseDouble(o.toString()), Field.Store.NO)); + + final String name; + private final BiFunction builder; + + private final Function> indexableFieldBuilder; + + DerivedFieldSupportedTypes( + String name, + BiFunction builder, + Function> indexableFieldBuilder + ) { + this.name = name; + this.builder = builder; + this.indexableFieldBuilder = indexableFieldBuilder; + } + + public String getName() { + return name; + } + + private FieldMapper getFieldMapper(String name, Mapper.BuilderContext context) { + return builder.apply(name, context); + } + + private Function getIndexableFieldGenerator(String name) { + return indexableFieldBuilder.apply(name); + } + + private static final Map enumMap = Arrays.stream(DerivedFieldSupportedTypes.values()) + .collect(Collectors.toMap(DerivedFieldSupportedTypes::getName, enumValue -> enumValue)); + + public static FieldMapper getFieldMapperFromType(String type, String name, Mapper.BuilderContext context) { + if (!enumMap.containsKey(type)) { + throw new IllegalArgumentException("Type [" + type + "] isn't supported in Derived field context."); + } + return enumMap.get(type).getFieldMapper(name, context); + } + + public static Function getIndexableFieldGeneratorType(String type, String name) { + if (!enumMap.containsKey(type)) { + throw new IllegalArgumentException("Type [" + type + "] isn't supported in Derived field context."); + } + return enumMap.get(type).getIndexableFieldGenerator(name); + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java new file mode 100644 index 0000000000000..abdca7879cc94 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java @@ -0,0 +1,363 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper; +import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.Query; +import org.opensearch.common.Nullable; +import org.opensearch.common.geo.ShapeRelation; +import org.opensearch.common.time.DateMathParser; +import org.opensearch.common.unit.Fuzziness; +import org.opensearch.index.query.DerivedFieldQuery; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.script.DerivedFieldScript; +import org.opensearch.script.Script; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.time.ZoneId; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +/** + * MappedFieldType for Derived Fields + * Contains logic to execute different type of queries on a derived field of given type. + * @opensearch.internal + */ +public final class DerivedFieldType extends MappedFieldType { + private final String type; + + private final Script script; + + FieldMapper typeFieldMapper; + + final Function indexableFieldGenerator; + + public DerivedFieldType( + String name, + String type, + Script script, + boolean isIndexed, + boolean isStored, + boolean hasDocValues, + Map meta, + FieldMapper typeFieldMapper, + Function fieldFunction + ) { + super(name, isIndexed, isStored, hasDocValues, typeFieldMapper.fieldType().getTextSearchInfo(), meta); + this.type = type; + this.script = script; + this.typeFieldMapper = typeFieldMapper; + this.indexableFieldGenerator = fieldFunction; + } + + public DerivedFieldType( + String name, + String type, + Script script, + FieldMapper typeFieldMapper, + Function fieldFunction + ) { + this(name, type, script, false, false, false, Collections.emptyMap(), typeFieldMapper, fieldFunction); + } + + @Override + public String typeName() { + return "derived"; + } + + public String getType() { + return type; + } + + @Override + public DerivedFieldValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + if (format != null) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); + } + return new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.termQuery(value, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query termQueryCaseInsensitive(Object value, @Nullable QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.termQueryCaseInsensitive(value, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query termsQuery(List values, @Nullable QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.termsQuery(values, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query rangeQuery( + Object lowerTerm, + Object upperTerm, + boolean includeLower, + boolean includeUpper, + ShapeRelation relation, + ZoneId timeZone, + DateMathParser parser, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.rangeQuery( + lowerTerm, + upperTerm, + includeLower, + includeUpper, + relation, + timeZone, + parser, + context + ); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query fuzzyQuery( + Object value, + Fuzziness fuzziness, + int prefixLength, + int maxExpansions, + boolean transpositions, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.fuzzyQuery(value, fuzziness, prefixLength, maxExpansions, transpositions, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query fuzzyQuery( + Object value, + Fuzziness fuzziness, + int prefixLength, + int maxExpansions, + boolean transpositions, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.fuzzyQuery( + value, + fuzziness, + prefixLength, + maxExpansions, + transpositions, + method, + context + ); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query prefixQuery( + String value, + @Nullable MultiTermQuery.RewriteMethod method, + boolean caseInsensitive, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.prefixQuery(value, method, caseInsensitive, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query wildcardQuery( + String value, + @Nullable MultiTermQuery.RewriteMethod method, + boolean caseInsensitive, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.wildcardQuery(value, method, caseInsensitive, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query normalizedWildcardQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.normalizedWildcardQuery(value, method, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query regexpQuery( + String value, + int syntaxFlags, + int matchFlags, + int maxDeterminizedStates, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.regexpQuery(value, syntaxFlags, matchFlags, maxDeterminizedStates, method, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query phraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) throws IOException { + Query query = typeFieldMapper.mappedFieldType.phraseQuery(stream, slop, enablePositionIncrements, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) + throws IOException { + Query query = typeFieldMapper.mappedFieldType.multiPhraseQuery(stream, slop, enablePositionIncrements, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions, QueryShardContext context) throws IOException { + Query query = typeFieldMapper.mappedFieldType.phrasePrefixQuery(stream, slop, maxExpansions, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public SpanQuery spanPrefixQuery(String value, SpanMultiTermQueryWrapper.SpanRewriteMethod method, QueryShardContext context) { + throw new IllegalArgumentException( + "Can only use span prefix queries on text fields - not on [" + name() + "] which is of type [" + typeName() + "]" + ); + } + + @Override + public Query distanceFeatureQuery(Object origin, String pivot, float boost, QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.distanceFeatureQuery(origin, pivot, boost, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query existsQuery(QueryShardContext context) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support exist queries"); + } + + @Override + public boolean isAggregatable() { + return false; + } + + private DerivedFieldScript.LeafFactory getDerivedFieldLeafFactory(QueryShardContext context) { + if (!context.documentMapper("").sourceMapper().enabled()) { + throw new IllegalArgumentException( + "DerivedFieldQuery error: unable to fetch fields from _source field: _source is disabled in the mappings " + + "for index [" + + context.index().getName() + + "]" + ); + } + DerivedFieldScript.Factory factory = context.compile(script, DerivedFieldScript.CONTEXT); + return factory.newFactory(script.getParams(), context.lookup()); + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java index c14b2c92c89c3..42b974734e5e7 100644 --- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java @@ -218,7 +218,7 @@ protected List> getParameters() { ); } - private KeywordFieldType buildFieldType(BuilderContext context, FieldType fieldType) { + protected KeywordFieldType buildFieldType(BuilderContext context, FieldType fieldType) { NamedAnalyzer normalizer = Lucene.KEYWORD_ANALYZER; NamedAnalyzer searchAnalyzer = Lucene.KEYWORD_ANALYZER; String normalizerName = this.normalizer.getValue(); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 551dd338eed2a..f3a62cdf1436f 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -179,6 +179,7 @@ import org.opensearch.index.warmer.WarmerStats; import org.opensearch.indices.IndexingMemoryController; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryFailedException; @@ -349,6 +350,7 @@ Runnable getGlobalCheckpointSyncer() { private final List internalRefreshListener = new ArrayList<>(); private final RemoteStoreFileDownloader fileDownloader; private final RecoverySettings recoverySettings; + private final RemoteStoreSettings remoteStoreSettings; /* On source doc rep node, It will be DOCREP_NON_MIGRATING. On source remote node , it will be REMOTE_MIGRATING_SEEDED when relocating from remote node @@ -381,9 +383,9 @@ public IndexShard( @Nullable final SegmentReplicationCheckpointPublisher checkpointPublisher, @Nullable final Store remoteStore, final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, - final Supplier clusterRemoteTranslogBufferIntervalSupplier, final String nodeId, final RecoverySettings recoverySettings, + final RemoteStoreSettings remoteStoreSettings, boolean seedRemote ) throws IOException { super(shardRouting.shardId(), indexSettings); @@ -405,7 +407,7 @@ public IndexShard( threadPool, this::getEngine, indexSettings.isRemoteNode(), - () -> getRemoteTranslogUploadBufferInterval(clusterRemoteTranslogBufferIntervalSupplier) + () -> getRemoteTranslogUploadBufferInterval(remoteStoreSettings::getClusterRemoteTranslogBufferInterval) ); this.mapperService = mapperService; this.indexCache = indexCache; @@ -481,6 +483,7 @@ public boolean shouldCache(Query query) { : mapperService.documentMapper().mappers().containsTimeStampField(); this.remoteStoreStatsTrackerFactory = remoteStoreStatsTrackerFactory; this.recoverySettings = recoverySettings; + this.remoteStoreSettings = remoteStoreSettings; this.fileDownloader = new RemoteStoreFileDownloader(shardRouting.shardId(), threadPool, recoverySettings); this.shardMigrationState = getShardMigrationState(indexSettings, seedRemote); } @@ -598,6 +601,10 @@ public RecoverySettings getRecoverySettings() { return recoverySettings; } + public RemoteStoreSettings getRemoteStoreSettings() { + return remoteStoreSettings; + } + public RemoteStoreFileDownloader getFileDownloader() { return fileDownloader; } diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index fb96102bc6094..351aec6e3af6c 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -224,7 +224,7 @@ private boolean syncSegments() { // is considered as a first refresh post commit. A cleanup of stale commit files is triggered. // This is done to avoid delete post each refresh. if (isRefreshAfterCommit()) { - remoteDirectory.deleteStaleSegmentsAsync(indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles()); + remoteDirectory.deleteStaleSegmentsAsync(indexShard.getRemoteStoreSettings().getMinRemoteSegmentMetadataFiles()); } try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { diff --git a/server/src/main/java/org/opensearch/indices/DefaultRemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/DefaultRemoteStoreSettings.java new file mode 100644 index 0000000000000..d3937600a848b --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/DefaultRemoteStoreSettings.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; + +/** + * Utility to provide a {@link RemoteStoreSettings} instance containing all defaults + * + * @opensearch.internal + */ +public final class DefaultRemoteStoreSettings { + private DefaultRemoteStoreSettings() {} + + public static final RemoteStoreSettings INSTANCE = new RemoteStoreSettings( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 9bc81c1826c2d..28ad4b23e319c 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -249,17 +249,6 @@ public class IndicesService extends AbstractLifecycleComponent Property.Final ); - /** - * Used to specify the default translog buffer interval for remote store backed indexes. - */ - public static final Setting CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING = Setting.timeSetting( - "cluster.remote_store.translog.buffer_interval", - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, - IndexSettings.MINIMUM_REMOTE_TRANSLOG_BUFFER_INTERVAL, - Property.NodeScope, - Property.Dynamic - ); - /** * This setting is used to set the refresh interval when the {@code index.refresh_interval} index setting is not * provided during index creation or when the existing {@code index.refresh_interval} index setting is set as null. @@ -366,7 +355,7 @@ public class IndicesService extends AbstractLifecycleComponent private volatile boolean idFieldDataEnabled; private volatile boolean allowExpensiveQueries; private final RecoverySettings recoverySettings; - + private final RemoteStoreSettings remoteStoreSettings; @Nullable private final OpenSearchThreadPoolExecutor danglingIndicesThreadPoolExecutor; private final Set danglingIndicesToWrite = Sets.newConcurrentHashSet(); @@ -375,8 +364,6 @@ public class IndicesService extends AbstractLifecycleComponent private final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory; private final BiFunction translogFactorySupplier; private volatile TimeValue clusterDefaultRefreshInterval; - private volatile TimeValue clusterRemoteTranslogBufferInterval; - private final SearchRequestStats searchRequestStats; @Override @@ -411,7 +398,8 @@ public IndicesService( SearchRequestStats searchRequestStats, @Nullable RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, RecoverySettings recoverySettings, - CacheService cacheService + CacheService cacheService, + RemoteStoreSettings remoteStoreSettings ) { this.settings = settings; this.threadPool = threadPool; @@ -515,10 +503,8 @@ protected void closeInternal() { this.clusterDefaultRefreshInterval = CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings() .addSettingsUpdateConsumer(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING, this::onRefreshIntervalUpdate); - this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(clusterService.getSettings()); - clusterService.getClusterSettings() - .addSettingsUpdateConsumer(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, this::setClusterRemoteTranslogBufferInterval); this.recoverySettings = recoverySettings; + this.remoteStoreSettings = remoteStoreSettings; } /** @@ -923,8 +909,8 @@ private synchronized IndexService createIndexService( remoteDirectoryFactory, translogFactorySupplier, this::getClusterDefaultRefreshInterval, - this::getClusterRemoteTranslogBufferInterval, - this.recoverySettings + this.recoverySettings, + this.remoteStoreSettings ); } @@ -2044,12 +2030,7 @@ private TimeValue getClusterDefaultRefreshInterval() { return this.clusterDefaultRefreshInterval; } - // Exclusively for testing, please do not use it elsewhere. - public TimeValue getClusterRemoteTranslogBufferInterval() { - return clusterRemoteTranslogBufferInterval; - } - - private void setClusterRemoteTranslogBufferInterval(TimeValue clusterRemoteTranslogBufferInterval) { - this.clusterRemoteTranslogBufferInterval = clusterRemoteTranslogBufferInterval; + public RemoteStoreSettings getRemoteStoreSettings() { + return this.remoteStoreSettings; } } diff --git a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java new file mode 100644 index 0000000000000..5e6dba2b398db --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.IndexSettings; + +/** + * Settings for remote store + * + * @opensearch.api + */ +@PublicApi(since = "2.14.0") +public class RemoteStoreSettings { + + /** + * Used to specify the default translog buffer interval for remote store backed indexes. + */ + public static final Setting CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING = Setting.timeSetting( + "cluster.remote_store.translog.buffer_interval", + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + IndexSettings.MINIMUM_REMOTE_TRANSLOG_BUFFER_INTERVAL, + Property.NodeScope, + Property.Dynamic + ); + + /** + * Controls minimum number of metadata files to keep in remote segment store. + * {@code value < 1} will disable deletion of stale segment metadata files. + */ + public static final Setting CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING = Setting.intSetting( + "cluster.remote_store.index.segment_metadata.retention.max_count", + 10, + -1, + v -> { + if (v == 0) { + throw new IllegalArgumentException( + "Value 0 is not allowed for this setting as it would delete all the data from remote segment store" + ); + } + }, + Property.NodeScope, + Property.Dynamic + ); + + private volatile TimeValue clusterRemoteTranslogBufferInterval; + private volatile int minRemoteSegmentMetadataFiles; + + public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { + this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer( + CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, + this::setClusterRemoteTranslogBufferInterval + ); + + minRemoteSegmentMetadataFiles = CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer( + CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, + this::setMinRemoteSegmentMetadataFiles + ); + } + + // Exclusively for testing, please do not use it elsewhere. + public TimeValue getClusterRemoteTranslogBufferInterval() { + return clusterRemoteTranslogBufferInterval; + } + + private void setClusterRemoteTranslogBufferInterval(TimeValue clusterRemoteTranslogBufferInterval) { + this.clusterRemoteTranslogBufferInterval = clusterRemoteTranslogBufferInterval; + } + + private void setMinRemoteSegmentMetadataFiles(int minRemoteSegmentMetadataFiles) { + this.minRemoteSegmentMetadataFiles = minRemoteSegmentMetadataFiles; + } + + public int getMinRemoteSegmentMetadataFiles() { + return this.minRemoteSegmentMetadataFiles; + } +} diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java index 2b41eb125d808..53b42347aa30d 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java @@ -159,25 +159,6 @@ public class RecoverySettings { Property.NodeScope ); - /** - * Controls minimum number of metadata files to keep in remote segment store. - * {@code value < 1} will disable deletion of stale segment metadata files. - */ - public static final Setting CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING = Setting.intSetting( - "cluster.remote_store.index.segment_metadata.retention.max_count", - 10, - -1, - v -> { - if (v == 0) { - throw new IllegalArgumentException( - "Value 0 is not allowed for this setting as it would delete all the data from remote segment store" - ); - } - }, - Property.NodeScope, - Property.Dynamic - ); - public static final Setting INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT = Setting.timeSetting( "indices.recovery.internal_remote_upload_timeout", new TimeValue(1, TimeUnit.HOURS), @@ -199,7 +180,6 @@ public class RecoverySettings { private volatile TimeValue internalActionTimeout; private volatile TimeValue internalActionRetryTimeout; private volatile TimeValue internalActionLongTimeout; - private volatile int minRemoteSegmentMetadataFiles; private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE; private volatile TimeValue internalRemoteUploadTimeout; @@ -243,11 +223,6 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { this::setInternalActionLongTimeout ); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, this::setActivityTimeout); - minRemoteSegmentMetadataFiles = CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.get(settings); - clusterSettings.addSettingsUpdateConsumer( - CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, - this::setMinRemoteSegmentMetadataFiles - ); clusterSettings.addSettingsUpdateConsumer(INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT, this::setInternalRemoteUploadTimeout); } @@ -354,11 +329,4 @@ private void setMaxConcurrentRemoteStoreStreams(int maxConcurrentRemoteStoreStre this.maxConcurrentRemoteStoreStreams = maxConcurrentRemoteStoreStreams; } - private void setMinRemoteSegmentMetadataFiles(int minRemoteSegmentMetadataFiles) { - this.minRemoteSegmentMetadataFiles = minRemoteSegmentMetadataFiles; - } - - public int getMinRemoteSegmentMetadataFiles() { - return this.minRemoteSegmentMetadataFiles; - } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index ea449afe1c811..8348b8f02d342 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -153,6 +153,7 @@ import org.opensearch.index.store.remote.filecache.FileCacheFactory; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndexDescriptor; import org.opensearch.indices.SystemIndices; @@ -788,6 +789,8 @@ protected Node( final RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings()); + final RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, settingsModule.getClusterSettings()); + final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( repositoriesServiceReference::get, threadPool @@ -825,7 +828,8 @@ protected Node( searchRequestStats, remoteStoreStatsTrackerFactory, recoverySettings, - cacheService + cacheService, + remoteStoreSettings ); final IngestService ingestService = new IngestService( diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index 97bc822be7d51..82d7ab06f126b 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -99,6 +99,7 @@ import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; import org.opensearch.index.translog.TranslogFactory; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.analysis.AnalysisModule; @@ -261,8 +262,8 @@ private IndexService newIndexService(IndexModule module) throws IOException { new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), translogFactorySupplier, () -> IndexSettings.DEFAULT_REFRESH_INTERVAL, - () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, - DefaultRecoverySettings.INSTANCE + DefaultRecoverySettings.INSTANCE, + DefaultRemoteStoreSettings.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java new file mode 100644 index 0000000000000..72fb7c88cc478 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java @@ -0,0 +1,94 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.document.DoubleField; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.document.KeywordField; +import org.apache.lucene.document.LatLonPoint; +import org.apache.lucene.document.LongField; +import org.apache.lucene.document.LongPoint; +import org.opensearch.script.Script; + +import java.util.List; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DerivedFieldTypeTests extends FieldTypeTestCase { + + private DerivedFieldType createDerivedFieldType(String type) { + Mapper.BuilderContext context = mock(Mapper.BuilderContext.class); + when(context.path()).thenReturn(new ContentPath()); + return new DerivedFieldType( + type + " _derived_field", + type, + new Script(""), + DerivedFieldSupportedTypes.getFieldMapperFromType(type, type + "_derived_field", context), + DerivedFieldSupportedTypes.getIndexableFieldGeneratorType(type, type + "_derived_field") + ); + } + + public void testBooleanType() { + DerivedFieldType dft = createDerivedFieldType("boolean"); + assertTrue(dft.typeFieldMapper instanceof BooleanFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(true) instanceof Field); + assertTrue(dft.indexableFieldGenerator.apply(false) instanceof Field); + } + + public void testDateType() { + DerivedFieldType dft = createDerivedFieldType("date"); + assertTrue(dft.typeFieldMapper instanceof DateFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(System.currentTimeMillis()) instanceof LongPoint); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply("blah")); + } + + public void testGeoPointType() { + DerivedFieldType dft = createDerivedFieldType("geo_point"); + assertTrue(dft.typeFieldMapper instanceof GeoPointFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(List.of(10.0, 20.0)) instanceof LatLonPoint); + expectThrows(ClassCastException.class, () -> dft.indexableFieldGenerator.apply(List.of(10.0))); + expectThrows(ClassCastException.class, () -> dft.indexableFieldGenerator.apply(List.of())); + expectThrows(ClassCastException.class, () -> dft.indexableFieldGenerator.apply(List.of("10"))); + expectThrows(ClassCastException.class, () -> dft.indexableFieldGenerator.apply(List.of(10.0, 20.0, 30.0))); + } + + public void testIPType() { + DerivedFieldType dft = createDerivedFieldType("ip"); + assertTrue(dft.typeFieldMapper instanceof IpFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply("127.0.0.1") instanceof InetAddressPoint); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply("blah")); + } + + public void testKeywordType() { + DerivedFieldType dft = createDerivedFieldType("keyword"); + assertTrue(dft.typeFieldMapper instanceof KeywordFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply("test_keyword") instanceof KeywordField); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply(10)); + } + + public void testLongType() { + DerivedFieldType dft = createDerivedFieldType("long"); + assertTrue(dft.typeFieldMapper instanceof NumberFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(10) instanceof LongField); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply(10.0)); + } + + public void testDoubleType() { + DerivedFieldType dft = createDerivedFieldType("double"); + assertTrue(dft.typeFieldMapper instanceof NumberFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(10.0) instanceof DoubleField); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply("")); + } + + public void testUnsupportedType() { + expectThrows(IllegalArgumentException.class, () -> createDerivedFieldType("match_only_text")); + } +} diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 85878cc2e1c9d..33f6c67b94b3d 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -34,7 +34,7 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils; import org.opensearch.index.store.Store; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; -import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.threadpool.ThreadPool; @@ -249,7 +249,7 @@ public void testAfterMultipleCommits() throws IOException { setup(true, 3); assertDocs(indexShard, "1", "2", "3"); - for (int i = 0; i < indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles() + 3; i++) { + for (int i = 0; i < indexShard.getRemoteStoreSettings().getMinRemoteSegmentMetadataFiles() + 3; i++) { indexDocs(4 * (i + 1), 4); flushShard(indexShard); } @@ -635,9 +635,9 @@ private Tuple mockIn RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory = indexShard.getRemoteStoreStatsTrackerFactory(); when(shard.indexSettings()).thenReturn(indexShard.indexSettings()); when(shard.shardId()).thenReturn(indexShard.shardId()); - RecoverySettings recoverySettings = mock(RecoverySettings.class); - when(recoverySettings.getMinRemoteSegmentMetadataFiles()).thenReturn(10); - when(shard.getRecoverySettings()).thenReturn(recoverySettings); + RemoteStoreSettings remoteStoreSettings = mock(RemoteStoreSettings.class); + when(remoteStoreSettings.getMinRemoteSegmentMetadataFiles()).thenReturn(10); + when(shard.getRemoteStoreSettings()).thenReturn(remoteStoreSettings); RemoteStoreRefreshListener refreshListener = new RemoteStoreRefreshListener(shard, emptyCheckpointPublisher, tracker); refreshListener.afterRefresh(true); return Tuple.tuple(refreshListener, remoteStoreStatsTrackerFactory); diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index 742dbdeba8c5b..6757dbc184961 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -622,6 +622,9 @@ public void testConflictingEngineFactories() { public void testClusterRemoteTranslogBufferIntervalDefault() { IndicesService indicesService = getIndicesService(); - assertEquals(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, indicesService.getClusterRemoteTranslogBufferInterval()); + assertEquals( + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + indicesService.getRemoteStoreSettings().getClusterRemoteTranslogBufferInterval() + ); } } diff --git a/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java b/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java new file mode 100644 index 0000000000000..3809a44e55901 --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchTestCase; + +public class RemoteStoreSettingsDynamicUpdateTests extends OpenSearchTestCase { + private final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + private final RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(Settings.EMPTY, clusterSettings); + + public void testSegmentMetadataRetention() { + // Default value + assertEquals(10, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value < default (10) + clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 5) + .build() + ); + assertEquals(5, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting min value + clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -1) + .build() + ); + assertEquals(-1, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value > default (10) + clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 15) + .build() + ); + assertEquals(15, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value to 0 should fail and retain the existing value + assertThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 0) + .build() + ) + ); + assertEquals(15, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value < -1 should fail and retain the existing value + assertThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -5) + .build() + ) + ); + assertEquals(15, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + } +} diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java index 18e7dfb375132..75639661f539d 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java @@ -96,49 +96,4 @@ public void testInternalLongActionTimeout() { ); assertEquals(new TimeValue(duration, timeUnit), recoverySettings.internalActionLongTimeout()); } - - public void testSegmentMetadataRetention() { - // Default value - assertEquals(10, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting value < default (10) - clusterSettings.applySettings( - Settings.builder().put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 5).build() - ); - assertEquals(5, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting min value - clusterSettings.applySettings( - Settings.builder().put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -1).build() - ); - assertEquals(-1, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting value > default (10) - clusterSettings.applySettings( - Settings.builder().put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 15).build() - ); - assertEquals(15, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting value to 0 should fail and retain the existing value - assertThrows( - IllegalArgumentException.class, - () -> clusterSettings.applySettings( - Settings.builder() - .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 0) - .build() - ) - ); - assertEquals(15, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting value < -1 should fail and retain the existing value - assertThrows( - IllegalArgumentException.class, - () -> clusterSettings.applySettings( - Settings.builder() - .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -5) - .build() - ) - ); - assertEquals(15, recoverySettings.getMinRemoteSegmentMetadataFiles()); - } } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index e9bbf3d861408..4326e5fc63961 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -192,6 +192,7 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.filecache.FileCacheStats; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; @@ -2077,7 +2078,8 @@ public void onFailure(final Exception e) { null, new RemoteStoreStatsTrackerFactory(clusterService, settings), DefaultRecoverySettings.INSTANCE, - new CacheModule(new ArrayList<>(), settings).getCacheService() + new CacheModule(new ArrayList<>(), settings).getCacheService(), + DefaultRemoteStoreSettings.INSTANCE ); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); snapshotShardsService = new SnapshotShardsService( diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index a2f9eb677c0ac..9baa6110ab54e 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -116,6 +116,7 @@ import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogFactory; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesService; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.indices.recovery.AsyncRecoveryTarget; @@ -708,9 +709,9 @@ protected IndexShard newShard( checkpointPublisher, remoteStore, remoteStoreStatsTrackerFactory, - () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, "dummy-node", DefaultRecoverySettings.INSTANCE, + DefaultRemoteStoreSettings.INSTANCE, false ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER);