From 1ec5bcb8c88c7672b83764ac3b1592fbabd61803 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 15:13:29 +0800 Subject: [PATCH 01/30] Bump com.google.apis:google-api-services-compute from v1-rev20240407-2.0.0 to v1-rev20241015-2.0.0 in /plugins/discovery-gce (#16502) * Bump com.google.apis:google-api-services-compute Bumps com.google.apis:google-api-services-compute from v1-rev20240407-2.0.0 to v1-rev20241015-2.0.0. --- updated-dependencies: - dependency-name: com.google.apis:google-api-services-compute dependency-type: direct:production ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: gaobinlong Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: gaobinlong --- CHANGELOG.md | 1 + plugins/discovery-gce/build.gradle | 2 +- .../google-api-services-compute-v1-rev20240407-2.0.0.jar.sha1 | 1 - .../google-api-services-compute-v1-rev20241015-2.0.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20240407-2.0.0.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20241015-2.0.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 36c8a34a90b62..2b3d5a7327c06 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Switch from `buildSrc/version.properties` to Gradle version catalog (`gradle/libs.versions.toml`) to enable dependabot to perform automated upgrades on common libs ([#16284](https://github.com/opensearch-project/OpenSearch/pull/16284)) ### Dependencies +- Bump `com.google.apis:google-api-services-compute` from v1-rev20240407-2.0.0 to v1-rev20241015-2.0.0 ([#16502](https://github.com/opensearch-project/OpenSearch/pull/16502)) ### Changed diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 76beb78bf533c..4e05544a33f1d 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -18,7 +18,7 @@ opensearchplugin { } dependencies { - api "com.google.apis:google-api-services-compute:v1-rev20240407-2.0.0" + api "com.google.apis:google-api-services-compute:v1-rev20241015-2.0.0" api "com.google.api-client:google-api-client:1.35.2" api "com.google.oauth-client:google-oauth-client:1.36.0" api "com.google.http-client:google-http-client:${versions.google_http_client}" diff --git a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20240407-2.0.0.jar.sha1 b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20240407-2.0.0.jar.sha1 deleted file mode 100644 index 834d718641a51..0000000000000 --- a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20240407-2.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -edf93bc92c9b87fee51aa6c3545b565e58075c05 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20241015-2.0.0.jar.sha1 b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20241015-2.0.0.jar.sha1 new file mode 100644 index 0000000000000..1de9a570242fd --- /dev/null +++ b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20241015-2.0.0.jar.sha1 @@ -0,0 +1 @@ +83d293916d59ced480e48fd8c0aefb643e27566c \ No newline at end of file From f57b8895340341d79908401d8d27b427feaef2e9 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Wed, 30 Oct 2024 19:49:33 +0800 Subject: [PATCH 02/30] Bump com.azure:azure-storage-common from 12.25.1 to 12.27.1 (#16521) * Bump com.azure:azure-storage-common from 12.25.1 to 12.27.1 Signed-off-by: Gao Binlong * Update changelog Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong Signed-off-by: gaobinlong --- CHANGELOG.md | 1 + plugins/repository-azure/build.gradle | 2 +- .../licenses/azure-storage-common-12.25.1.jar.sha1 | 1 - .../licenses/azure-storage-common-12.27.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-storage-common-12.25.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-storage-common-12.27.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 2b3d5a7327c06..a16453a369114 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Switch from `buildSrc/version.properties` to Gradle version catalog (`gradle/libs.versions.toml`) to enable dependabot to perform automated upgrades on common libs ([#16284](https://github.com/opensearch-project/OpenSearch/pull/16284)) ### Dependencies +- Bump `com.azure:azure-storage-common` from 12.25.1 to 12.27.1 ([#16521](https://github.com/opensearch-project/OpenSearch/pull/16521)) - Bump `com.google.apis:google-api-services-compute` from v1-rev20240407-2.0.0 to v1-rev20241015-2.0.0 ([#16502](https://github.com/opensearch-project/OpenSearch/pull/16502)) ### Changed diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 3d6b3264d3f60..3c346a21e5566 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -47,7 +47,7 @@ dependencies { api 'com.azure:azure-core:1.51.0' api 'com.azure:azure-json:1.3.0' api 'com.azure:azure-xml:1.1.0' - api 'com.azure:azure-storage-common:12.25.1' + api 'com.azure:azure-storage-common:12.27.1' api 'com.azure:azure-core-http-netty:1.15.5' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.25.1.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.25.1.jar.sha1 deleted file mode 100644 index 822a60d81ca27..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-common-12.25.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -96e2df76ce9a8fa084ae289bb59295d565f2b8d5 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.27.1.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.27.1.jar.sha1 new file mode 100644 index 0000000000000..d7602da1418d1 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-common-12.27.1.jar.sha1 @@ -0,0 +1 @@ +c477c5d8c0f2076da1c5345c1097be6a319fe7c4 \ No newline at end of file From 4b284c542707037b4fcc8163db2ab7b14abe8263 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 10:50:08 -0400 Subject: [PATCH 03/30] Bump com.azure:azure-storage-blob from 12.23.0 to 12.28.1 in /plugins/repository-azure (#16501) * Bump com.azure:azure-storage-blob in /plugins/repository-azure Bumps [com.azure:azure-storage-blob](https://github.com/Azure/azure-sdk-for-java) from 12.23.0 to 12.28.1. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-storage-blob_12.23.0...azure-storage-blob_12.28.1) --- updated-dependencies: - dependency-name: com.azure:azure-storage-blob dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-azure/build.gradle | 2 +- .../licenses/azure-storage-blob-12.23.0.jar.sha1 | 1 - .../licenses/azure-storage-blob-12.28.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-storage-blob-12.23.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-storage-blob-12.28.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index a16453a369114..303e2708b6677 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies - Bump `com.azure:azure-storage-common` from 12.25.1 to 12.27.1 ([#16521](https://github.com/opensearch-project/OpenSearch/pull/16521)) - Bump `com.google.apis:google-api-services-compute` from v1-rev20240407-2.0.0 to v1-rev20241015-2.0.0 ([#16502](https://github.com/opensearch-project/OpenSearch/pull/16502)) +- Bump `com.azure:azure-storage-blob` from 12.23.0 to 12.28.1 ([#16501](https://github.com/opensearch-project/OpenSearch/pull/16501)) ### Changed diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 3c346a21e5566..efcd01d2bad5e 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -56,7 +56,7 @@ dependencies { api "io.netty:netty-resolver-dns:${versions.netty}" api "io.netty:netty-transport-native-unix-common:${versions.netty}" implementation project(':modules:transport-netty4') - api 'com.azure:azure-storage-blob:12.23.0' + api 'com.azure:azure-storage-blob:12.28.1' api 'com.azure:azure-identity:1.13.2' // Start of transitive dependencies for azure-identity api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0' diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.23.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.23.0.jar.sha1 deleted file mode 100644 index 5f32d64b00918..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-blob-12.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3eeb49d5109e812343fb436e4bbb2eecac8fe386 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.28.1.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.28.1.jar.sha1 new file mode 100644 index 0000000000000..95ac42063d36f --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-blob-12.28.1.jar.sha1 @@ -0,0 +1 @@ +4f1eef206d0a71e4ef6c5e79578dd1b9c0370869 \ No newline at end of file From 80ca32fe9f99165a322e115605027a4df7695f1e Mon Sep 17 00:00:00 2001 From: Chenyang Ji Date: Thu, 31 Oct 2024 18:32:03 -0700 Subject: [PATCH 04/30] remove resource usages object from headers (#16532) Signed-off-by: Chenyang Ji --- CHANGELOG.md | 1 + .../search/SearchTaskRequestOperationsListener.java | 8 ++++++++ .../opensearch/common/util/concurrent/ThreadContext.java | 9 +++++++++ .../opensearch/tasks/TaskResourceTrackingService.java | 7 +++++++ 4 files changed, 25 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 303e2708b6677..5209f46229edf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix get index settings API doesn't show `number_of_routing_shards` setting when it was explicitly set ([#16294](https://github.com/opensearch-project/OpenSearch/pull/16294)) - Revert changes to upload remote state manifest using minimum codec version([#16403](https://github.com/opensearch-project/OpenSearch/pull/16403)) - Ensure index templates are not applied to system indices ([#16418](https://github.com/opensearch-project/OpenSearch/pull/16418)) +- Remove resource usages object from search response headers ([#16532](https://github.com/opensearch-project/OpenSearch/pull/16532)) ### Security diff --git a/server/src/main/java/org/opensearch/action/search/SearchTaskRequestOperationsListener.java b/server/src/main/java/org/opensearch/action/search/SearchTaskRequestOperationsListener.java index 4434d71793b23..ee111b563b747 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTaskRequestOperationsListener.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTaskRequestOperationsListener.java @@ -25,6 +25,14 @@ public SearchTaskRequestOperationsListener(TaskResourceTrackingService taskResou @Override public void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + // Refresh the coordinator node level resource usages taskResourceTrackingService.refreshResourceStats(context.getTask()); + // Remove the shard level resource usages from thread context + taskResourceTrackingService.removeTaskResourceUsage(); + } + + @Override + public void onRequestFailure(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + taskResourceTrackingService.removeTaskResourceUsage(); } } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 070e18481f2a3..75a7ef94978d4 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -547,6 +547,15 @@ public void updateResponseHeader(final String key, final String value, final Fun threadLocal.set(threadLocal.get().putResponse(key, value, uniqueValue, maxWarningHeaderCount, maxWarningHeaderSize, true)); } + /** + * Remove the {@code value} for the specified {@code key}. + * + * @param key the header name + */ + public void removeResponseHeader(final String key) { + threadLocal.get().responseHeaders.remove(key); + } + /** * Saves the current thread context and wraps command in a Runnable that restores that context before running command. If * command has already been passed through this method then it is returned unaltered rather than wrapped twice. diff --git a/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java b/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java index ca1957cdb1633..a184673a8fa2f 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java +++ b/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java @@ -328,6 +328,13 @@ public void writeTaskResourceUsage(SearchShardTask task, String nodeId) { } } + /** + * Remove the current task level resource usages. + */ + public void removeTaskResourceUsage() { + threadPool.getThreadContext().removeResponseHeader(TASK_RESOURCE_USAGE); + } + /** * Get the task resource usages from {@link ThreadContext} * From 0363aa79c434343dc7f5b8df9f77b2d4652f4c7e Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Thu, 31 Oct 2024 22:54:00 -0700 Subject: [PATCH 05/30] Adds cluster setting to allow caching requests with size>0 in request cache (#16484) * Add cluster setting to allow size>0 in request cache Signed-off-by: Peter Alfonsi * Add to changelog Signed-off-by: Peter Alfonsi * addressed dbwiddis's comments Signed-off-by: Peter Alfonsi * make canCacheSizeNonzeroRequests volatile Signed-off-by: Peter Alfonsi * fix changelog merge Signed-off-by: Peter Alfonsi * Changed setting name Signed-off-by: Peter Alfonsi * more renaming Signed-off-by: Peter Alfonsi * fix spotless check Signed-off-by: Peter Alfonsi * rerun gradle check Signed-off-by: Peter Alfonsi --------- Signed-off-by: Peter Alfonsi Signed-off-by: Peter Alfonsi Co-authored-by: Peter Alfonsi --- CHANGELOG.md | 1 + .../indices/IndicesRequestCacheIT.java | 18 +++++ .../common/settings/ClusterSettings.java | 1 + .../indices/IndicesRequestCache.java | 12 ++++ .../opensearch/indices/IndicesService.java | 16 ++++- .../indices/IndicesServiceTests.java | 69 +++++++++++++++---- 6 files changed, 101 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5209f46229edf..84fd3810ff158 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add logic in master service to optimize performance and retain detailed logging for critical cluster operations. ([#14795](https://github.com/opensearch-project/OpenSearch/pull/14795)) - Add Setting to adjust the primary constraint weights ([#16471](https://github.com/opensearch-project/OpenSearch/pull/16471)) - Switch from `buildSrc/version.properties` to Gradle version catalog (`gradle/libs.versions.toml`) to enable dependabot to perform automated upgrades on common libs ([#16284](https://github.com/opensearch-project/OpenSearch/pull/16284)) +- Add dynamic setting allowing size > 0 requests to be cached in the request cache ([#16483](https://github.com/opensearch-project/OpenSearch/pull/16483/files)) ### Dependencies - Bump `com.azure:azure-storage-common` from 12.25.1 to 12.27.1 ([#16521](https://github.com/opensearch-project/OpenSearch/pull/16521)) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 557f9e19ee424..bab085bf265af 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -43,6 +43,7 @@ import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; @@ -89,6 +90,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING; +import static org.opensearch.indices.IndicesRequestCache.INDICES_REQUEST_CACHE_ENABLE_FOR_ALL_REQUESTS_SETTING; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.dateRange; @@ -579,6 +581,22 @@ public void testCanCache() throws Exception { OpenSearchAssertions.assertAllSuccessful(r4); assertThat(r4.getHits().getTotalHits().value, equalTo(7L)); assertCacheState(client, index, 0, 4); + + // If size > 0 we should cache if this is enabled via cluster setting + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder().put(INDICES_REQUEST_CACHE_ENABLE_FOR_ALL_REQUESTS_SETTING.getKey(), true) + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + final SearchResponse r7 = client.prepareSearch(index) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(1) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-22").lte("2016-03-26")) + .get(); + OpenSearchAssertions.assertAllSuccessful(r7); + assertThat(r7.getHits().getTotalHits().value, equalTo(5L)); + assertCacheState(client, index, 0, 6); } public void testCacheWithFilteredAlias() throws InterruptedException { diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index c1f4e52706465..cac4b3914df5a 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -519,6 +519,7 @@ public void apply(Settings value, Settings current, Settings previous) { IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE, IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING, IndicesRequestCache.INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING, + IndicesRequestCache.INDICES_REQUEST_CACHE_ENABLE_FOR_ALL_REQUESTS_SETTING, HunspellService.HUNSPELL_LAZY_LOAD, HunspellService.HUNSPELL_IGNORE_CASE, HunspellService.HUNSPELL_DICTIONARY_OPTIONS, diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index 156fe32ff5809..4dde4445cd483 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -147,6 +147,18 @@ public final class IndicesRequestCache implements RemovalListener 0 queries. + * If enabled, fundamentally non-cacheable queries like DFS queries, queries using the `now` keyword, and + * scroll requests are still not cached. + */ + public static final Setting INDICES_REQUEST_CACHE_ENABLE_FOR_ALL_REQUESTS_SETTING = Setting.boolSetting( + "indices.requests.cache.enable_for_all_requests", + false, + Property.NodeScope, + Property.Dynamic + ); + private final static long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Key.class); private final ConcurrentMap registeredClosedListeners = ConcurrentCollections.newConcurrentMap(); diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 1c12e8ca17194..1a4c9067939a9 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -205,6 +205,7 @@ import static org.opensearch.index.IndexService.IndexCreationContext.CREATE_INDEX; import static org.opensearch.index.IndexService.IndexCreationContext.METADATA_VERIFICATION; import static org.opensearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder; +import static org.opensearch.indices.IndicesRequestCache.INDICES_REQUEST_CACHE_ENABLE_FOR_ALL_REQUESTS_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteDataAttributePresent; import static org.opensearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; @@ -360,6 +361,7 @@ public class IndicesService extends AbstractLifecycleComponent private final FileCache fileCache; private final CompositeIndexSettings compositeIndexSettings; private final Consumer replicator; + private volatile boolean requestCachingEnabledForAllQueries; @Override protected void doStart() { @@ -507,6 +509,9 @@ protected void closeInternal() { this.compositeIndexSettings = compositeIndexSettings; this.fileCache = fileCache; this.replicator = replicator; + this.requestCachingEnabledForAllQueries = INDICES_REQUEST_CACHE_ENABLE_FOR_ALL_REQUESTS_SETTING.get(clusterService.getSettings()); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(INDICES_REQUEST_CACHE_ENABLE_FOR_ALL_REQUESTS_SETTING, this::setRequestCachingEnabledForAllQueries); } public IndicesService( @@ -1746,11 +1751,11 @@ public boolean canCache(ShardSearchRequest request, SearchContext context) { IndexSettings settings = context.indexShard().indexSettings(); // if not explicitly set in the request, use the index setting, if not, use the request if (request.requestCache() == null) { - if (settings.getValue(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING) == false) { - return false; - } else if (context.size() != 0) { + if (settings.getValue(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING) == false + || (context.size() > 0 && !requestCachingEnabledForAllQueries)) { // If no request cache query parameter and shard request cache // is enabled in settings don't cache for requests with size > 0 + // unless this is enabled via cluster setting return false; } } else if (request.requestCache() == false) { @@ -2118,4 +2123,9 @@ public RemoteStoreSettings getRemoteStoreSettings() { public CompositeIndexSettings getCompositeIndexSettings() { return this.compositeIndexSettings; } + + // Package-private for testing + void setRequestCachingEnabledForAllQueries(Boolean requestCachingEnabledForAllQueries) { + this.requestCachingEnabledForAllQueries = requestCachingEnabledForAllQueries; + } } diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index b5350a39e8599..d2250702b48fd 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -641,25 +641,68 @@ public void testDirectoryReaderWithoutDelegatingCacheHelperNotCacheable() throws ShardSearchRequest request = mock(ShardSearchRequest.class); when(request.requestCache()).thenReturn(true); - TestSearchContext context = new TestSearchContext(indexService.getBigArrays(), indexService) { - @Override - public SearchType searchType() { - return SearchType.QUERY_THEN_FETCH; - } - }; + TestSearchContext context = getTestContext(indexService, 0); + IndexReader.CacheHelper notDelegatingCacheHelper = mock(IndexReader.CacheHelper.class); + DelegatingCacheHelper delegatingCacheHelper = mock(DelegatingCacheHelper.class); + for (boolean useDelegatingCacheHelper : new boolean[] { true, false }) { + IndexReader.CacheHelper cacheHelper = useDelegatingCacheHelper ? delegatingCacheHelper : notDelegatingCacheHelper; + setupMocksForCanCache(context, cacheHelper); + assertEquals(useDelegatingCacheHelper, indicesService.canCache(request, context)); + } + } + + public void testCanCacheSizeNonzero() { + // Size == 0 requests should always be cacheable (if they pass the other checks). + // Size > 0 requests should only be cacheable if ALLOW_SIZE_NONZERO_SETTING is true. + + final IndexService indexService = createIndex("test"); + ShardSearchRequest request = mock(ShardSearchRequest.class); + when(request.requestCache()).thenReturn(null); + + TestSearchContext sizeZeroContext = getTestContext(indexService, 0); + TestSearchContext sizeNonzeroContext = getTestContext(indexService, 10); + + // Test for an IndicesService with the default setting value of false + IndicesService indicesService = getIndicesService(); + DelegatingCacheHelper cacheHelper = mock(DelegatingCacheHelper.class); + Map expectedResultMap = Map.of(sizeZeroContext, true, sizeNonzeroContext, false); + for (Map.Entry entry : expectedResultMap.entrySet()) { + TestSearchContext context = entry.getKey(); + setupMocksForCanCache(context, cacheHelper); + assertEquals(entry.getValue(), indicesService.canCache(request, context)); + } + // Simulate the cluster setting update by manually calling setCanCacheSizeNonzeroRequests + indicesService.setRequestCachingEnabledForAllQueries(true); + expectedResultMap = Map.of(sizeZeroContext, true, sizeNonzeroContext, true); + + for (Map.Entry entry : expectedResultMap.entrySet()) { + TestSearchContext context = entry.getKey(); + setupMocksForCanCache(context, cacheHelper); + assertEquals(entry.getValue(), indicesService.canCache(request, context)); + } + } + + private void setupMocksForCanCache(TestSearchContext context, IndexReader.CacheHelper cacheHelper) { ContextIndexSearcher searcher = mock(ContextIndexSearcher.class); context.setSearcher(searcher); DirectoryReader reader = mock(DirectoryReader.class); when(searcher.getDirectoryReader()).thenReturn(reader); when(searcher.getIndexReader()).thenReturn(reader); - IndexReader.CacheHelper notDelegatingCacheHelper = mock(IndexReader.CacheHelper.class); - DelegatingCacheHelper delegatingCacheHelper = mock(DelegatingCacheHelper.class); + when(reader.getReaderCacheHelper()).thenReturn(cacheHelper); + } - for (boolean useDelegatingCacheHelper : new boolean[] { true, false }) { - IndexReader.CacheHelper cacheHelper = useDelegatingCacheHelper ? delegatingCacheHelper : notDelegatingCacheHelper; - when(reader.getReaderCacheHelper()).thenReturn(cacheHelper); - assertEquals(useDelegatingCacheHelper, indicesService.canCache(request, context)); - } + private TestSearchContext getTestContext(IndexService indexService, int size) { + return new TestSearchContext(indexService.getBigArrays(), indexService) { + @Override + public SearchType searchType() { + return SearchType.QUERY_THEN_FETCH; + } + + @Override + public int size() { + return size; + } + }; } } From a2a01f821760f7f27eaf6d30b6a4daaadda9fec8 Mon Sep 17 00:00:00 2001 From: panguixin Date: Sat, 2 Nov 2024 01:29:04 +0800 Subject: [PATCH 06/30] Support retrieving doc values of unsigned long field (#16543) * Support retrieving doc values of unsigned long field Signed-off-by: panguixin * add test Signed-off-by: panguixin * changelog Signed-off-by: panguixin * randomize test Signed-off-by: panguixin --------- Signed-off-by: panguixin --- CHANGELOG.md | 1 + .../plain/SortedNumericIndexFieldData.java | 23 +++++++++++++++++ .../index/mapper/NumberFieldTypeTests.java | 25 +++++++++++++++++++ 3 files changed, 49 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 84fd3810ff158..edbf7c8ed065c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Revert changes to upload remote state manifest using minimum codec version([#16403](https://github.com/opensearch-project/OpenSearch/pull/16403)) - Ensure index templates are not applied to system indices ([#16418](https://github.com/opensearch-project/OpenSearch/pull/16418)) - Remove resource usages object from search response headers ([#16532](https://github.com/opensearch-project/OpenSearch/pull/16532)) +- Support retrieving doc values of unsigned long field ([#16543](https://github.com/opensearch-project/OpenSearch/pull/16543)) ### Security diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/SortedNumericIndexFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/plain/SortedNumericIndexFieldData.java index 0019a41e67c02..8a61d86f6f615 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/plain/SortedNumericIndexFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/SortedNumericIndexFieldData.java @@ -41,6 +41,7 @@ import org.apache.lucene.sandbox.document.HalfFloatPoint; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.NumericUtils; +import org.opensearch.common.Numbers; import org.opensearch.common.time.DateUtils; import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.fielddata.FieldData; @@ -573,6 +574,28 @@ public final SortedBinaryDocValues getBytesValues() { return FieldData.toUnsignedString(getLongValues()); } + @Override + public DocValueFetcher.Leaf getLeafValueFetcher(DocValueFormat format) { + SortedNumericDocValues values = getLongValues(); + return new DocValueFetcher.Leaf() { + @Override + public boolean advanceExact(int docId) throws IOException { + return values.advanceExact(docId); + } + + @Override + public int docValueCount() { + return values.docValueCount(); + } + + @Override + public Object nextValue() throws IOException { + final BigInteger value = Numbers.toUnsignedBigInteger(values.nextValue()); + return format.format(value); + } + }; + } + @Override public long ramBytesUsed() { return 0L; diff --git a/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java index b27ef49303205..c06371bed9357 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java @@ -73,6 +73,7 @@ import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType; import org.opensearch.index.mapper.NumberFieldMapper.NumberType; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.DocValueFormat; import org.opensearch.search.MultiValueMode; import org.opensearch.search.query.BitmapDocValuesQuery; import org.junit.Before; @@ -981,4 +982,28 @@ public void testBitmapQuery() throws IOException { NumberFieldType finalFt = ft; assertThrows(IllegalArgumentException.class, () -> finalFt.bitmapQuery(bitmap)); } + + public void testFetchUnsignedLongDocValues() throws IOException { + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null)); + Document doc = new Document(); + final BigInteger expectedValue = randomUnsignedLong(); + doc.add(new SortedNumericDocValuesField("ul", expectedValue.longValue())); + w.addDocument(doc); + try (DirectoryReader reader = DirectoryReader.open(w)) { + final NumberFieldType ft = new NumberFieldType("ul", NumberType.UNSIGNED_LONG); + IndexNumericFieldData fielddata = (IndexNumericFieldData) ft.fielddataBuilder( + "index", + () -> { throw new UnsupportedOperationException(); } + ).build(null, null); + assertEquals(IndexNumericFieldData.NumericType.UNSIGNED_LONG, fielddata.getNumericType()); + DocValueFetcher.Leaf fetcher = fielddata.load(reader.leaves().get(0)).getLeafValueFetcher(DocValueFormat.UNSIGNED_LONG); + assertTrue(fetcher.advanceExact(0)); + assertEquals(1, fetcher.docValueCount()); + final Object value = fetcher.nextValue(); + assertTrue(value instanceof BigInteger); + assertEquals(expectedValue, value); + } + IOUtils.close(w, dir); + } } From bb131f99f41bb5f98e7b41b4a048c55dbd1af1d4 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Mon, 4 Nov 2024 11:30:54 -0500 Subject: [PATCH 07/30] Bump versions in README to 2.19.0 and 2.18.1 (#16554) Signed-off-by: Craig Perkins --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 95fbac7bbecf1..6ae7e12948670 100644 --- a/README.md +++ b/README.md @@ -7,8 +7,8 @@ [![Security Vulnerabilities](https://img.shields.io/github/issues/opensearch-project/OpenSearch/security%20vulnerability?labelColor=red)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"security%20vulnerability") [![Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/issues) [![Open Pull Requests](https://img.shields.io/github/issues-pr/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/pulls) -[![2.18.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.18.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.18.0") -[![2.17.2 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.17.2)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.17.2") +[![2.19.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.19.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.19.0") +[![2.18.1 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.18.1)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.18.1") [![3.0.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v3.0.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v3.0.0") [![GHA gradle check](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml) [![GHA validate pull request](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml) From f32f5c66fa19286881eaed1160798a6883ba7d5d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 12:02:57 -0500 Subject: [PATCH 08/30] Bump org.apache.hadoop:hadoop-minicluster from 3.4.0 to 3.4.1 in /test/fixtures/hdfs-fixture (#16550) * Bump org.apache.hadoop:hadoop-minicluster in /test/fixtures/hdfs-fixture Bumps org.apache.hadoop:hadoop-minicluster from 3.4.0 to 3.4.1. --- updated-dependencies: - dependency-name: org.apache.hadoop:hadoop-minicluster dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index edbf7c8ed065c..52c4d5a0b478a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.azure:azure-storage-common` from 12.25.1 to 12.27.1 ([#16521](https://github.com/opensearch-project/OpenSearch/pull/16521)) - Bump `com.google.apis:google-api-services-compute` from v1-rev20240407-2.0.0 to v1-rev20241015-2.0.0 ([#16502](https://github.com/opensearch-project/OpenSearch/pull/16502)) - Bump `com.azure:azure-storage-blob` from 12.23.0 to 12.28.1 ([#16501](https://github.com/opensearch-project/OpenSearch/pull/16501)) +- Bump `org.apache.hadoop:hadoop-minicluster` from 3.4.0 to 3.4.1 ([#16550](https://github.com/opensearch-project/OpenSearch/pull/16550)) ### Changed diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index d27273f357758..18bcee8b338fc 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -37,7 +37,7 @@ versions << [ ] dependencies { - api("org.apache.hadoop:hadoop-minicluster:3.4.0") { + api("org.apache.hadoop:hadoop-minicluster:3.4.1") { exclude module: 'websocket-client' exclude module: 'jettison' exclude module: 'netty' From 4c35a2b418aa4cf14b8f49d7639d386b96752ae8 Mon Sep 17 00:00:00 2001 From: kkewwei Date: Tue, 5 Nov 2024 05:23:32 +0800 Subject: [PATCH 09/30] fix rollover alias supports restored searchable snapshot index (#16483) Signed-off-by: kkewwei Signed-off-by: kkewwei --- CHANGELOG.md | 1 + .../alias/TransportIndicesAliasesAction.java | 4 +- .../rollover/TransportRolloverAction.java | 13 +- .../put/TransportUpdateSettingsAction.java | 7 +- .../cluster/block/ClusterBlocks.java | 26 ++- .../cluster/block/ClusterBlocksTests.java | 177 ++++++++++++++++++ 6 files changed, 215 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 52c4d5a0b478a..bbb30d78aa5d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Ensure index templates are not applied to system indices ([#16418](https://github.com/opensearch-project/OpenSearch/pull/16418)) - Remove resource usages object from search response headers ([#16532](https://github.com/opensearch-project/OpenSearch/pull/16532)) - Support retrieving doc values of unsigned long field ([#16543](https://github.com/opensearch-project/OpenSearch/pull/16543)) +- Fix rollover alias supports restored searchable snapshot index([#16483](https://github.com/opensearch-project/OpenSearch/pull/16483)) ### Security diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index 81cb3102cfcb9..42e02e9e1aff8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -41,7 +41,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlockException; -import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.metadata.AliasAction; import org.opensearch.cluster.metadata.AliasMetadata; import org.opensearch.cluster.metadata.IndexAbstraction; @@ -123,7 +123,7 @@ protected ClusterBlockException checkBlock(IndicesAliasesRequest request, Cluste for (IndicesAliasesRequest.AliasActions aliasAction : request.aliasActions()) { Collections.addAll(indices, aliasAction.indices()); } - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indices.toArray(new String[0])); + return ClusterBlocks.indicesWithRemoteSnapshotBlockedException(indices, state); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java index 3b11a3d82d707..28d1d14655e3b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -44,7 +44,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.block.ClusterBlockException; -import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.Metadata; @@ -62,8 +62,10 @@ import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; @@ -127,11 +129,10 @@ protected ClusterBlockException checkBlock(RolloverRequest request, ClusterState request.indicesOptions().expandWildcardsClosed() ); - return state.blocks() - .indicesBlockedException( - ClusterBlockLevel.METADATA_WRITE, - indexNameExpressionResolver.concreteIndexNames(state, indicesOptions, request) - ); + return ClusterBlocks.indicesWithRemoteSnapshotBlockedException( + new HashSet<>(Arrays.asList(indexNameExpressionResolver.concreteIndexNames(state, indicesOptions, request))), + state + ); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index 09cceca52ce23..779b136abef5c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -42,7 +42,7 @@ import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; -import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.MetadataUpdateSettingsService; import org.opensearch.cluster.service.ClusterService; @@ -118,9 +118,8 @@ protected ClusterBlockException checkBlock(UpdateSettingsRequest request, Cluste return globalBlock; } if (request.settings().size() == 1 && // we have to allow resetting these settings otherwise users can't unblock an index - IndexMetadata.INDEX_BLOCKS_METADATA_SETTING.exists(request.settings()) - || IndexMetadata.INDEX_READ_ONLY_SETTING.exists(request.settings()) - || IndexMetadata.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING.exists(request.settings())) { + ClusterBlocks.INDEX_DATA_READ_ONLY_BLOCK_SETTINGS.stream() + .anyMatch(booleanSetting -> booleanSetting.exists(request.settings()))) { return null; } diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java index 615ea18315cd1..c894fa5dce714 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java @@ -33,19 +33,23 @@ package org.opensearch.cluster.block; import org.opensearch.cluster.AbstractDiffable; +import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.Diff; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MetadataIndexStateService; import org.opensearch.common.Nullable; import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.Setting; import org.opensearch.common.util.set.Sets; import org.opensearch.core.common.io.stream.BufferedChecksumStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.VerifiableWriteable; import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.IndexModule; import java.io.IOException; +import java.util.Collection; import java.util.Collections; import java.util.EnumMap; import java.util.HashMap; @@ -57,6 +61,7 @@ import static java.util.Collections.emptySet; import static java.util.Collections.unmodifiableSet; import static java.util.stream.Collectors.toSet; +import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; /** * Represents current cluster level blocks to block dirty operations done against the cluster. @@ -66,7 +71,11 @@ @PublicApi(since = "1.0.0") public class ClusterBlocks extends AbstractDiffable implements VerifiableWriteable { public static final ClusterBlocks EMPTY_CLUSTER_BLOCK = new ClusterBlocks(emptySet(), Map.of()); - + public static final Set> INDEX_DATA_READ_ONLY_BLOCK_SETTINGS = Set.of( + IndexMetadata.INDEX_READ_ONLY_SETTING, + IndexMetadata.INDEX_BLOCKS_METADATA_SETTING, + IndexMetadata.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING + ); private final Set global; private final Map> indicesBlocks; @@ -276,6 +285,21 @@ public ClusterBlockException indicesAllowReleaseResources(String[] indices) { return new ClusterBlockException(indexLevelBlocks); } + public static ClusterBlockException indicesWithRemoteSnapshotBlockedException(Collection concreteIndices, ClusterState state) { + for (String index : concreteIndices) { + if (state.blocks().indexBlocked(ClusterBlockLevel.METADATA_WRITE, index)) { + IndexMetadata indexMeta = state.metadata().index(index); + if (indexMeta != null + && (IndexModule.Type.REMOTE_SNAPSHOT.match(indexMeta.getSettings().get(INDEX_STORE_TYPE_SETTING.getKey())) == false + || ClusterBlocks.INDEX_DATA_READ_ONLY_BLOCK_SETTINGS.stream() + .anyMatch(booleanSetting -> booleanSetting.exists(indexMeta.getSettings())))) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices.toArray(new String[0])); + } + } + } + return null; + } + @Override public String toString() { if (global.isEmpty() && indices().isEmpty()) { diff --git a/server/src/test/java/org/opensearch/cluster/block/ClusterBlocksTests.java b/server/src/test/java/org/opensearch/cluster/block/ClusterBlocksTests.java index 839e831d38b1b..47e3d0cb44cc9 100644 --- a/server/src/test/java/org/opensearch/cluster/block/ClusterBlocksTests.java +++ b/server/src/test/java/org/opensearch/cluster/block/ClusterBlocksTests.java @@ -8,12 +8,40 @@ package org.opensearch.cluster.block; +import com.carrotsearch.randomizedtesting.RandomizedTest; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.AliasMetadata; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.BufferedChecksumStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexSettings; import org.opensearch.test.OpenSearchTestCase; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + import static org.opensearch.cluster.block.ClusterBlockTests.randomClusterBlock; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_METADATA_BLOCK; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_READ_BLOCK; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_READ_ONLY_BLOCK; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_WRITE_BLOCK; +import static org.opensearch.cluster.metadata.IndexMetadata.REMOTE_READ_ONLY_ALLOW_DELETE; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; public class ClusterBlocksTests extends OpenSearchTestCase { @@ -52,4 +80,153 @@ public void testWriteVerifiableTo() throws Exception { clusterBlocks2.writeVerifiableTo(checksumOut2); assertEquals(checksumOut.getChecksum(), checksumOut2.getChecksum()); } + + public void testGlobalBlock() { + String index = "test-000001"; + ClusterState.Builder stateBuilder = ClusterState.builder(new ClusterName("cluster")); + Set indices = new HashSet<>(); + indices.add(index); + + // no global blocks + { + stateBuilder.blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK); + ClusterState clusterState = stateBuilder.build(); + clusterState.blocks(); + assertNull(ClusterBlocks.indicesWithRemoteSnapshotBlockedException(indices, clusterState)); + } + + // has global block + { + for (ClusterBlock block : Arrays.asList( + INDEX_READ_ONLY_BLOCK, + INDEX_READ_BLOCK, + INDEX_WRITE_BLOCK, + INDEX_METADATA_BLOCK, + INDEX_READ_ONLY_ALLOW_DELETE_BLOCK, + REMOTE_READ_ONLY_ALLOW_DELETE + )) { + stateBuilder.blocks(ClusterBlocks.builder().addGlobalBlock(block).build()); + ClusterState clusterState = stateBuilder.build(); + clusterState.blocks(); + assertNull(ClusterBlocks.indicesWithRemoteSnapshotBlockedException(indices, clusterState)); + } + } + } + + public void testIndexWithBlock() { + String index = "test-000001"; + Set indices = new HashSet<>(); + indices.add(index); + ClusterState.Builder stateBuilder = ClusterState.builder(new ClusterName("cluster")); + stateBuilder.blocks(ClusterBlocks.builder().addIndexBlock(index, IndexMetadata.INDEX_METADATA_BLOCK)); + stateBuilder.metadata(Metadata.builder().put(createIndexMetadata(index, false, null, null), false)); + ClusterState clusterState = stateBuilder.build(); + clusterState.blocks(); + assertNotNull(ClusterBlocks.indicesWithRemoteSnapshotBlockedException(indices, stateBuilder.build())); + } + + public void testRemoteIndexBlock() { + String remoteIndex = "remote_index"; + Set indices = new HashSet<>(); + indices.add(remoteIndex); + ClusterState.Builder stateBuilder = ClusterState.builder(new ClusterName("cluster")); + + { + IndexMetadata remoteSnapshotIndexMetadata = createIndexMetadata(remoteIndex, true, null, null); + stateBuilder.metadata(Metadata.builder().put(remoteSnapshotIndexMetadata, false)); + stateBuilder.blocks(ClusterBlocks.builder().addBlocks(remoteSnapshotIndexMetadata)); + + ClusterState clusterState = stateBuilder.build(); + assertTrue(clusterState.blocks().hasIndexBlock(remoteIndex, IndexMetadata.REMOTE_READ_ONLY_ALLOW_DELETE)); + clusterState.blocks(); + assertNull(ClusterBlocks.indicesWithRemoteSnapshotBlockedException(indices, clusterState)); + } + + // searchable snapshot index with block + { + Setting setting = RandomizedTest.randomFrom(new ArrayList<>(ClusterBlocks.INDEX_DATA_READ_ONLY_BLOCK_SETTINGS)); + IndexMetadata remoteSnapshotIndexMetadata = createIndexMetadata(remoteIndex, true, null, setting); + stateBuilder.metadata(Metadata.builder().put(remoteSnapshotIndexMetadata, false)); + stateBuilder.blocks(ClusterBlocks.builder().addBlocks(remoteSnapshotIndexMetadata)); + ClusterState clusterState = stateBuilder.build(); + clusterState.blocks(); + assertNotNull(ClusterBlocks.indicesWithRemoteSnapshotBlockedException(indices, clusterState)); + } + } + + public void testRemoteIndexWithoutBlock() { + String remoteIndex = "remote_index"; + ClusterState.Builder stateBuilder = ClusterState.builder(new ClusterName("cluster")); + + String alias = "alias1"; + IndexMetadata remoteSnapshotIndexMetadata = createIndexMetadata(remoteIndex, true, alias, null); + String index = "test-000001"; + IndexMetadata indexMetadata = createIndexMetadata(index, false, alias, null); + stateBuilder.metadata(Metadata.builder().put(remoteSnapshotIndexMetadata, false).put(indexMetadata, false)); + + Set indices = new HashSet<>(); + indices.add(remoteIndex); + ClusterState clusterState = stateBuilder.build(); + clusterState.blocks(); + assertNull(ClusterBlocks.indicesWithRemoteSnapshotBlockedException(indices, clusterState)); + } + + public void testRemoteIndexWithIndexBlock() { + String index = "test-000001"; + String remoteIndex = "remote_index"; + String alias = "alias1"; + { + ClusterState.Builder stateBuilder = ClusterState.builder(new ClusterName("cluster")); + IndexMetadata remoteSnapshotIndexMetadata = createIndexMetadata(remoteIndex, true, alias, null); + IndexMetadata indexMetadata = createIndexMetadata(index, false, alias, null); + stateBuilder.metadata(Metadata.builder().put(remoteSnapshotIndexMetadata, false).put(indexMetadata, false)) + .blocks(ClusterBlocks.builder().addBlocks(remoteSnapshotIndexMetadata)); + ClusterState clusterState = stateBuilder.build(); + clusterState.blocks(); + assertNull(ClusterBlocks.indicesWithRemoteSnapshotBlockedException(Collections.singleton(index), clusterState)); + clusterState.blocks(); + assertNull(ClusterBlocks.indicesWithRemoteSnapshotBlockedException(Collections.singleton(remoteIndex), clusterState)); + } + + { + ClusterState.Builder stateBuilder = ClusterState.builder(new ClusterName("cluster")); + Setting setting = RandomizedTest.randomFrom(new ArrayList<>(ClusterBlocks.INDEX_DATA_READ_ONLY_BLOCK_SETTINGS)); + IndexMetadata remoteSnapshotIndexMetadata = createIndexMetadata(remoteIndex, true, alias, setting); + IndexMetadata indexMetadata = createIndexMetadata(index, false, alias, null); + stateBuilder.metadata(Metadata.builder().put(remoteSnapshotIndexMetadata, false).put(indexMetadata, false)) + .blocks(ClusterBlocks.builder().addBlocks(remoteSnapshotIndexMetadata)); + ClusterState clusterState = stateBuilder.build(); + assertNull(ClusterBlocks.indicesWithRemoteSnapshotBlockedException(Collections.singleton(index), clusterState)); + assertNotNull(ClusterBlocks.indicesWithRemoteSnapshotBlockedException(Collections.singleton(remoteIndex), clusterState)); + } + } + + private IndexMetadata createIndexMetadata(String index, boolean isRemoteIndex, String alias, Setting blockSetting) { + IndexMetadata.Builder builder = IndexMetadata.builder(index).settings(createIndexSettingBuilder(isRemoteIndex, blockSetting)); + if (alias != null) { + AliasMetadata.Builder aliasBuilder = AliasMetadata.builder(alias); + return builder.putAlias(aliasBuilder.build()).build(); + } + return builder.build(); + } + + private Settings.Builder createIndexSettingBuilder(boolean isRemoteIndex, Setting blockSetting) { + Settings.Builder builder = Settings.builder() + .put(IndexMetadata.SETTING_INDEX_UUID, "abc") + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_CREATION_DATE, System.currentTimeMillis()) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1); + + if (isRemoteIndex) { + builder.put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey()) + .put(IndexSettings.SEARCHABLE_SNAPSHOT_REPOSITORY.getKey(), "repo") + .put(IndexSettings.SEARCHABLE_SNAPSHOT_ID_NAME.getKey(), "snapshot"); + } + if (blockSetting != null) { + builder.put(blockSetting.getKey(), true); + } + + return builder; + } } From b25e10afb9e216c547a59409d909ec1ecae101ec Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 5 Nov 2024 13:53:35 -0500 Subject: [PATCH 10/30] Ensure support of the transport-nio by security plugin (HTTP) (#16474) * Ensure support of the transport-nio by security plugin (HTTP) Signed-off-by: Andriy Redko * Add header verifier and decompressor support of secure NIO transport variant Signed-off-by: Andriy Redko --------- Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../ssl/SecureNetty4HttpServerTransport.java | 4 +- .../http/netty4/Netty4HttpClient.java | 2 - plugins/transport-nio/build.gradle | 5 +- ...-native-unix-common-4.1.114.Final.jar.sha1 | 1 + .../opensearch/http/nio/NioPipeliningIT.java | 4 +- .../http/nio/HttpReadWriteHandler.java | 39 +- .../org/opensearch/http/nio/NettyAdaptor.java | 20 +- .../http/nio/NioHttpServerTransport.java | 130 +++- .../org/opensearch/http/nio/ssl/SslUtils.java | 48 ++ .../opensearch/http/nio/ssl/package-info.java | 12 + .../transport/nio/NioTransportPlugin.java | 35 ++ .../opensearch/http/nio/NioHttpClient.java | 54 +- .../http/nio/NioHttpServerTransportTests.java | 12 +- .../SecureNioHttpServerTransportTests.java | 558 ++++++++++++++++++ .../src/test/resources/README.txt | 14 + .../src/test/resources/certificate.crt | 22 + .../src/test/resources/certificate.key | 28 + .../reactor/netty4/ReactorHttpClient.java | 4 +- .../SecureHttpTransportSettingsProvider.java | 10 + 20 files changed, 966 insertions(+), 37 deletions(-) create mode 100644 plugins/transport-nio/licenses/netty-transport-native-unix-common-4.1.114.Final.jar.sha1 create mode 100644 plugins/transport-nio/src/main/java/org/opensearch/http/nio/ssl/SslUtils.java create mode 100644 plugins/transport-nio/src/main/java/org/opensearch/http/nio/ssl/package-info.java create mode 100644 plugins/transport-nio/src/test/java/org/opensearch/http/nio/ssl/SecureNioHttpServerTransportTests.java create mode 100644 plugins/transport-nio/src/test/resources/README.txt create mode 100644 plugins/transport-nio/src/test/resources/certificate.crt create mode 100644 plugins/transport-nio/src/test/resources/certificate.key diff --git a/CHANGELOG.md b/CHANGELOG.md index bbb30d78aa5d0..bba62e97a49e0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Added - Latency and Memory allocation improvements to Multi Term Aggregation queries ([#14993](https://github.com/opensearch-project/OpenSearch/pull/14993)) - Add support for restoring from snapshot with search replicas ([#16111](https://github.com/opensearch-project/OpenSearch/pull/16111)) +- Ensure support of the transport-nio by security plugin ([#16474](https://github.com/opensearch-project/OpenSearch/pull/16474)) - Add logic in master service to optimize performance and retain detailed logging for critical cluster operations. ([#14795](https://github.com/opensearch-project/OpenSearch/pull/14795)) - Add Setting to adjust the primary constraint weights ([#16471](https://github.com/opensearch-project/OpenSearch/pull/16471)) - Switch from `buildSrc/version.properties` to Gradle version catalog (`gradle/libs.versions.toml`) to enable dependabot to perform automated upgrades on common libs ([#16284](https://github.com/opensearch-project/OpenSearch/pull/16284)) diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java index 978c92870bd75..e3a6dbb4c57b5 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java @@ -67,8 +67,8 @@ * @see SecuritySSLNettyHttpServerTransport */ public class SecureNetty4HttpServerTransport extends Netty4HttpServerTransport { - public static final String REQUEST_HEADER_VERIFIER = "HeaderVerifier"; - public static final String REQUEST_DECOMPRESSOR = "RequestDecompressor"; + public static final String REQUEST_HEADER_VERIFIER = SecureHttpTransportSettingsProvider.REQUEST_HEADER_VERIFIER; + public static final String REQUEST_DECOMPRESSOR = SecureHttpTransportSettingsProvider.REQUEST_DECOMPRESSOR; private static final Logger logger = LogManager.getLogger(SecureNetty4HttpServerTransport.class); private final SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider; diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java index ef6b67ea44299..cf841f2e24b1e 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java @@ -315,13 +315,11 @@ private static class CountDownLatchHandlerHttp2 extends AwaitableChannelInitiali private final CountDownLatch latch; private final Collection content; - private final boolean secure; private Http2SettingsHandler settingsHandler; CountDownLatchHandlerHttp2(final CountDownLatch latch, final Collection content, final boolean secure) { this.latch = latch; this.content = content; - this.secure = secure; } @Override diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index ee557aa0efc79..c0f0150378434 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -50,6 +50,7 @@ dependencies { api "io.netty:netty-handler:${versions.netty}" api "io.netty:netty-resolver:${versions.netty}" api "io.netty:netty-transport:${versions.netty}" + api "io.netty:netty-transport-native-unix-common:${versions.netty}" } tasks.named("dependencyLicenses").configure { @@ -151,10 +152,6 @@ thirdPartyAudit { 'io.netty.internal.tcnative.SessionTicketKey', 'io.netty.internal.tcnative.SniHostNameMatcher', - // from io.netty.channel.unix (netty) - 'io.netty.channel.unix.FileDescriptor', - 'io.netty.channel.unix.UnixChannel', - 'reactor.blockhound.BlockHound$Builder', 'reactor.blockhound.integration.BlockHoundIntegration' ) diff --git a/plugins/transport-nio/licenses/netty-transport-native-unix-common-4.1.114.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-native-unix-common-4.1.114.Final.jar.sha1 new file mode 100644 index 0000000000000..a80b9e51be74b --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-native-unix-common-4.1.114.Final.jar.sha1 @@ -0,0 +1 @@ +d1171bb99411f282068f49d780cedf8c9adeabfd \ No newline at end of file diff --git a/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/http/nio/NioPipeliningIT.java b/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/http/nio/NioPipeliningIT.java index 4f26e8ae65259..c4541e3b1c7d3 100644 --- a/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/http/nio/NioPipeliningIT.java +++ b/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/http/nio/NioPipeliningIT.java @@ -61,8 +61,8 @@ public void testThatNioHttpServerSupportsPipelining() throws Exception { TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); TransportAddress transportAddress = randomFrom(boundAddresses); - try (NioHttpClient nettyHttpClient = new NioHttpClient()) { - Collection responses = nettyHttpClient.get(transportAddress.address(), requests); + try (NioHttpClient client = NioHttpClient.http()) { + Collection responses = client.get(transportAddress.address(), requests); assertThat(responses, hasSize(5)); Collection opaqueIds = NioHttpClient.returnOpaqueIds(responses); diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/HttpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/HttpReadWriteHandler.java index d44515f3dc727..6438cca9cc33d 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/HttpReadWriteHandler.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/HttpReadWriteHandler.java @@ -32,6 +32,7 @@ package org.opensearch.http.nio; +import org.opensearch.common.Nullable; import org.opensearch.common.unit.TimeValue; import org.opensearch.http.HttpHandlingSettings; import org.opensearch.http.HttpPipelinedRequest; @@ -44,6 +45,8 @@ import org.opensearch.nio.TaskScheduler; import org.opensearch.nio.WriteOperation; +import javax.net.ssl.SSLEngine; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -58,6 +61,7 @@ import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpRequestDecoder; import io.netty.handler.codec.http.HttpResponseEncoder; +import io.netty.handler.ssl.SslHandler; public class HttpReadWriteHandler implements NioChannelHandler { @@ -77,6 +81,28 @@ public HttpReadWriteHandler( HttpHandlingSettings settings, TaskScheduler taskScheduler, LongSupplier nanoClock + ) { + this( + nioHttpChannel, + transport, + settings, + taskScheduler, + nanoClock, + null, /* no header verifier */ + new HttpContentDecompressor(), + null /* no SSL/TLS */ + ); + } + + HttpReadWriteHandler( + NioHttpChannel nioHttpChannel, + NioHttpServerTransport transport, + HttpHandlingSettings settings, + TaskScheduler taskScheduler, + LongSupplier nanoClock, + @Nullable ChannelHandler headerVerifier, + ChannelHandler decompressor, + @Nullable SSLEngine sslEngine ) { this.nioHttpChannel = nioHttpChannel; this.transport = transport; @@ -85,6 +111,12 @@ public HttpReadWriteHandler( this.readTimeoutNanos = TimeUnit.MILLISECONDS.toNanos(settings.getReadTimeoutMillis()); List handlers = new ArrayList<>(8); + + SslHandler sslHandler = null; + if (sslEngine != null) { + sslHandler = new SslHandler(sslEngine); + } + HttpRequestDecoder decoder = new HttpRequestDecoder( settings.getMaxInitialLineLength(), settings.getMaxHeaderSize(), @@ -92,7 +124,10 @@ public HttpReadWriteHandler( ); decoder.setCumulator(ByteToMessageDecoder.COMPOSITE_CUMULATOR); handlers.add(decoder); - handlers.add(new HttpContentDecompressor()); + if (headerVerifier != null) { + handlers.add(headerVerifier); + } + handlers.add(decompressor); handlers.add(new HttpResponseEncoder()); handlers.add(new HttpObjectAggregator(settings.getMaxContentLength())); if (settings.isCompression()) { @@ -102,7 +137,7 @@ public HttpReadWriteHandler( handlers.add(new NioHttpResponseCreator()); handlers.add(new NioHttpPipeliningHandler(transport.getLogger(), settings.getPipeliningMaxEvents())); - adaptor = new NettyAdaptor(handlers.toArray(new ChannelHandler[0])); + adaptor = new NettyAdaptor(sslHandler, handlers.toArray(new ChannelHandler[0])); adaptor.addCloseListener((v, e) -> nioHttpChannel.close()); } diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NettyAdaptor.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NettyAdaptor.java index 0b7f4ee7646d1..426690b4b696d 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NettyAdaptor.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NettyAdaptor.java @@ -33,6 +33,7 @@ package org.opensearch.http.nio; import org.opensearch.ExceptionsHelper; +import org.opensearch.common.Nullable; import org.opensearch.nio.FlushOperation; import org.opensearch.nio.Page; import org.opensearch.nio.WriteOperation; @@ -49,6 +50,7 @@ import io.netty.channel.ChannelOutboundHandlerAdapter; import io.netty.channel.ChannelPromise; import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.ssl.SslHandler; class NettyAdaptor { @@ -56,9 +58,13 @@ class NettyAdaptor { private final LinkedList flushOperations = new LinkedList<>(); NettyAdaptor(ChannelHandler... handlers) { - nettyChannel = new EmbeddedChannel(); - nettyChannel.pipeline().addLast("write_captor", new ChannelOutboundHandlerAdapter() { + this(null, handlers); + } + NettyAdaptor(@Nullable SslHandler sslHandler, ChannelHandler... handlers) { + this.nettyChannel = new EmbeddedChannel(); + + nettyChannel.pipeline().addLast("write_captor", new ChannelOutboundHandlerAdapter() { @Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) { // This is a little tricky. The embedded channel will complete the promise once it writes the message @@ -75,12 +81,22 @@ public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) } } }); + if (sslHandler != null) { + nettyChannel.pipeline().addAfter("write_captor", "ssl_handler", sslHandler); + } nettyChannel.pipeline().addLast(handlers); } public void close() throws Exception { assert flushOperations.isEmpty() : "Should close outbound operations before calling close"; + final SslHandler sslHandler = (SslHandler) nettyChannel.pipeline().get("ssl_handler"); + if (sslHandler != null) { + // The nettyChannel.close() or sslHandler.closeOutbound() futures will block indefinitely, + // removing the handler instead from the channel. + nettyChannel.pipeline().remove(sslHandler); + } + ChannelFuture closeFuture = nettyChannel.close(); // This should be safe as we are not a real network channel closeFuture.await(); diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java index ecf9ad9f17f87..9eca5fc87120d 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.OpenSearchException; import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.common.Nullable; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; @@ -47,6 +48,8 @@ import org.opensearch.http.AbstractHttpServerTransport; import org.opensearch.http.HttpChannel; import org.opensearch.http.HttpServerChannel; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.nio.ssl.SslUtils; import org.opensearch.nio.BytesChannelContext; import org.opensearch.nio.ChannelFactory; import org.opensearch.nio.Config; @@ -56,16 +59,28 @@ import org.opensearch.nio.NioSocketChannel; import org.opensearch.nio.ServerChannelContext; import org.opensearch.nio.SocketChannelContext; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportAdapterProvider; import org.opensearch.transport.nio.NioGroupFactory; import org.opensearch.transport.nio.PageAllocator; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; + import java.io.IOException; import java.net.InetSocketAddress; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; +import java.util.Collections; +import java.util.List; +import java.util.Optional; import java.util.function.Consumer; +import java.util.stream.Collectors; + +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.handler.codec.http.HttpContentDecompressor; import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE; import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; @@ -83,6 +98,9 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { private static final Logger logger = LogManager.getLogger(NioHttpServerTransport.class); + public static final String REQUEST_HEADER_VERIFIER = SecureHttpTransportSettingsProvider.REQUEST_HEADER_VERIFIER; + public static final String REQUEST_DECOMPRESSOR = SecureHttpTransportSettingsProvider.REQUEST_DECOMPRESSOR; + protected final PageAllocator pageAllocator; private final NioGroupFactory nioGroupFactory; @@ -97,6 +115,34 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { private volatile NioGroup nioGroup; private ChannelFactory channelFactory; + private final SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider; + + public NioHttpServerTransport( + Settings settings, + NetworkService networkService, + BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + ThreadPool threadPool, + NamedXContentRegistry xContentRegistry, + Dispatcher dispatcher, + NioGroupFactory nioGroupFactory, + ClusterSettings clusterSettings, + Tracer tracer + ) { + this( + settings, + networkService, + bigArrays, + pageCacheRecycler, + threadPool, + xContentRegistry, + dispatcher, + nioGroupFactory, + clusterSettings, + null, + tracer + ); + } public NioHttpServerTransport( Settings settings, @@ -108,6 +154,7 @@ public NioHttpServerTransport( Dispatcher dispatcher, NioGroupFactory nioGroupFactory, ClusterSettings clusterSettings, + @Nullable SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider, Tracer tracer ) { super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings, tracer); @@ -127,6 +174,7 @@ public NioHttpServerTransport( this.reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings); this.tcpSendBufferSize = Math.toIntExact(SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings).getBytes()); this.tcpReceiveBufferSize = Math.toIntExact(SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings).getBytes()); + this.secureHttpTransportSettingsProvider = secureHttpTransportSettingsProvider; logger.debug( "using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}]," @@ -178,8 +226,8 @@ protected HttpServerChannel bind(InetSocketAddress socketAddress) throws IOExcep return httpServerChannel; } - protected ChannelFactory channelFactory() { - return new HttpChannelFactory(); + protected ChannelFactory channelFactory() throws SSLException { + return new HttpChannelFactory(secureHttpTransportSettingsProvider); } protected void acceptChannel(NioSocketChannel socketChannel) { @@ -187,8 +235,11 @@ protected void acceptChannel(NioSocketChannel socketChannel) { } private class HttpChannelFactory extends ChannelFactory { + private final SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider; + private final ChannelInboundHandlerAdapter headerVerifier; + private final TransportAdapterProvider decompressorProvider; - private HttpChannelFactory() { + private HttpChannelFactory(@Nullable SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider) { super( tcpNoDelay, tcpKeepAlive, @@ -199,17 +250,85 @@ private HttpChannelFactory() { tcpSendBufferSize, tcpReceiveBufferSize ); + this.secureHttpTransportSettingsProvider = secureHttpTransportSettingsProvider; + + final List headerVerifiers = getHeaderVerifiers(secureHttpTransportSettingsProvider); + final Optional> decompressorProviderOpt = getDecompressorProvider( + secureHttpTransportSettingsProvider + ); + + // There could be multiple request decompressor providers configured, using the first one + decompressorProviderOpt.ifPresent(p -> logger.debug("Using request decompressor provider: {}", p)); + + if (headerVerifiers.size() > 1) { + throw new IllegalArgumentException( + "Cannot have more than one header verifier configured, supplied " + headerVerifiers.size() + ); + } + + this.headerVerifier = headerVerifiers.isEmpty() ? null : headerVerifiers.get(0); + this.decompressorProvider = decompressorProviderOpt.orElseGet(() -> new TransportAdapterProvider() { + @Override + public String name() { + return REQUEST_DECOMPRESSOR; + } + + @Override + public Optional create(Settings settings, HttpServerTransport transport, Class adapterClass) { + return Optional.empty(); + } + }); + + } + + private List getHeaderVerifiers( + @Nullable SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider + ) { + if (secureHttpTransportSettingsProvider == null) { + return Collections.emptyList(); + } + + return secureHttpTransportSettingsProvider.getHttpTransportAdapterProviders(settings) + .stream() + .filter(p -> REQUEST_HEADER_VERIFIER.equalsIgnoreCase(p.name())) + .map(p -> p.create(settings, NioHttpServerTransport.this, ChannelInboundHandlerAdapter.class)) + .filter(Optional::isPresent) + .map(Optional::get) + .collect(Collectors.toList()); + } + + private Optional> getDecompressorProvider( + @Nullable SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider + ) { + if (secureHttpTransportSettingsProvider == null) { + return Optional.empty(); + } + + return secureHttpTransportSettingsProvider.getHttpTransportAdapterProviders(settings) + .stream() + .filter(p -> REQUEST_DECOMPRESSOR.equalsIgnoreCase(p.name())) + .findFirst(); } @Override - public NioHttpChannel createChannel(NioSelector selector, SocketChannel channel, Config.Socket socketConfig) { + public NioHttpChannel createChannel(NioSelector selector, SocketChannel channel, Config.Socket socketConfig) throws IOException { + SSLEngine engine = null; + if (secureHttpTransportSettingsProvider != null) { + engine = secureHttpTransportSettingsProvider.buildSecureHttpServerEngine(settings, NioHttpServerTransport.this) + .orElseGet(SslUtils::createDefaultServerSSLEngine); + } + NioHttpChannel httpChannel = new NioHttpChannel(channel); HttpReadWriteHandler handler = new HttpReadWriteHandler( httpChannel, NioHttpServerTransport.this, handlingSettings, selector.getTaskScheduler(), - threadPool::relativeTimeInMillis + threadPool::relativeTimeInMillis, + headerVerifier, + decompressorProvider.create(settings, NioHttpServerTransport.this, ChannelInboundHandlerAdapter.class) + .orElseGet(HttpContentDecompressor::new), + engine ); Consumer exceptionHandler = (e) -> onException(httpChannel, e); SocketChannelContext context = new BytesChannelContext( @@ -244,6 +363,5 @@ public NioHttpServerChannel createServerChannel( httpServerChannel.setContext(context); return httpServerChannel; } - } } diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/ssl/SslUtils.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/ssl/SslUtils.java new file mode 100644 index 0000000000000..afd67f9799273 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/ssl/SslUtils.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ +package org.opensearch.http.nio.ssl; + +import org.opensearch.OpenSearchSecurityException; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; + +import java.security.NoSuchAlgorithmException; + +public class SslUtils { + private static final String[] DEFAULT_SSL_PROTOCOLS = { "TLSv1.3", "TLSv1.2", "TLSv1.1" }; + + private SslUtils() { + + } + + public static SSLEngine createDefaultServerSSLEngine() { + try { + final SSLEngine engine = SSLContext.getDefault().createSSLEngine(); + engine.setEnabledProtocols(DEFAULT_SSL_PROTOCOLS); + engine.setUseClientMode(false); + return engine; + } catch (final NoSuchAlgorithmException ex) { + throw new OpenSearchSecurityException("Unable to initialize default server SSL engine", ex); + } + } + + public static SSLEngine createDefaultClientSSLEngine() { + try { + final SSLEngine engine = SSLContext.getDefault().createSSLEngine(); + engine.setEnabledProtocols(DEFAULT_SSL_PROTOCOLS); + engine.setUseClientMode(true); + return engine; + } catch (final NoSuchAlgorithmException ex) { + throw new OpenSearchSecurityException("Unable to initialize default client SSL engine", ex); + } + } +} diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/ssl/package-info.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/ssl/package-info.java new file mode 100644 index 0000000000000..a67f8247ebd4d --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/ssl/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * SSL supporting utility classes + */ +package org.opensearch.http.nio.ssl; diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java index d4be876867651..7707369b59120 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java @@ -47,9 +47,11 @@ import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.HttpServerTransport.Dispatcher; import org.opensearch.http.nio.NioHttpServerTransport; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; @@ -66,6 +68,7 @@ public class NioTransportPlugin extends Plugin implements NetworkPlugin { public static final String NIO_TRANSPORT_NAME = "nio-transport"; public static final String NIO_HTTP_TRANSPORT_NAME = "nio-http-transport"; + public static final String NIO_SECURE_HTTP_TRANSPORT_NAME = "nio-http-transport-secure"; private static final Logger logger = LogManager.getLogger(NioTransportPlugin.class); @@ -140,6 +143,38 @@ public Map> getHttpTransports( ); } + @Override + public Map> getSecureHttpTransports( + Settings settings, + ThreadPool threadPool, + BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + Dispatcher dispatcher, + ClusterSettings clusterSettings, + SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider, + Tracer tracer + ) { + return Collections.singletonMap( + NIO_SECURE_HTTP_TRANSPORT_NAME, + () -> new NioHttpServerTransport( + settings, + networkService, + bigArrays, + pageCacheRecycler, + threadPool, + xContentRegistry, + dispatcher, + getNioGroupFactory(settings), + clusterSettings, + secureHttpTransportSettingsProvider, + tracer + ) + ); + } + private synchronized NioGroupFactory getNioGroupFactory(Settings settings) { NioGroupFactory nioGroupFactory = groupFactory.get(); if (nioGroupFactory != null) { diff --git a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpClient.java b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpClient.java index 45e51c6855f79..ff878eb55e411 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpClient.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpClient.java @@ -71,6 +71,7 @@ import java.util.function.Consumer; import io.netty.buffer.Unpooled; +import io.netty.buffer.UnpooledByteBufAllocator; import io.netty.channel.ChannelHandler; import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.DefaultFullHttpResponse; @@ -83,6 +84,10 @@ import io.netty.handler.codec.http.HttpRequestEncoder; import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpResponseDecoder; +import io.netty.handler.ssl.ClientAuth; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.SslHandler; +import io.netty.handler.ssl.util.InsecureTrustManagerFactory; import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory; import static io.netty.handler.codec.http.HttpHeaderNames.HOST; @@ -92,7 +97,7 @@ /** * Tiny helper to send http requests over nio. */ -class NioHttpClient implements Closeable { +public class NioHttpClient implements Closeable { static Collection returnOpaqueIds(Collection responses) { List list = new ArrayList<>(responses.size()); @@ -105,9 +110,11 @@ static Collection returnOpaqueIds(Collection responses private static final Logger logger = LogManager.getLogger(NioHttpClient.class); private final NioSelectorGroup nioGroup; + private final boolean secure; - NioHttpClient() { + private NioHttpClient(final boolean secure) { try { + this.secure = secure; nioGroup = new NioSelectorGroup( daemonThreadFactory(Settings.EMPTY, "nio-http-client"), 1, @@ -118,6 +125,14 @@ static Collection returnOpaqueIds(Collection responses } } + public static NioHttpClient http() { + return new NioHttpClient(false); + } + + public static NioHttpClient https() { + return new NioHttpClient(true); + } + public Collection get(InetSocketAddress remoteAddress, String... uris) throws InterruptedException { Collection requests = new ArrayList<>(uris.length); for (int i = 0; i < uris.length; i++) { @@ -138,7 +153,8 @@ public final FullHttpResponse send(InetSocketAddress remoteAddress, FullHttpRequ public final NioSocketChannel connect(InetSocketAddress remoteAddress) { ChannelFactory factory = new ClientChannelFactory( new CountDownLatch(0), - new ArrayList<>() + new ArrayList<>(), + secure ); try { NioSocketChannel nioSocketChannel = nioGroup.openChannel(remoteAddress, factory); @@ -160,7 +176,7 @@ private synchronized Collection sendRequests(InetSocketAddress final CountDownLatch latch = new CountDownLatch(requests.size()); final Collection content = Collections.synchronizedList(new ArrayList<>(requests.size())); - ChannelFactory factory = new ClientChannelFactory(latch, content); + ChannelFactory factory = new ClientChannelFactory(latch, content, secure); NioSocketChannel nioSocketChannel = null; try { @@ -196,8 +212,9 @@ private class ClientChannelFactory extends ChannelFactory content; + private final boolean secure; - private ClientChannelFactory(CountDownLatch latch, Collection content) { + private ClientChannelFactory(CountDownLatch latch, Collection content, final boolean secure) { super( NetworkService.TCP_NO_DELAY.get(Settings.EMPTY), NetworkService.TCP_KEEP_ALIVE.get(Settings.EMPTY), @@ -210,12 +227,14 @@ private ClientChannelFactory(CountDownLatch latch, Collection ); this.latch = latch; this.content = content; + this.secure = secure; } @Override - public NioSocketChannel createChannel(NioSelector selector, java.nio.channels.SocketChannel channel, Config.Socket socketConfig) { + public NioSocketChannel createChannel(NioSelector selector, java.nio.channels.SocketChannel channel, Config.Socket socketConfig) + throws IOException { NioSocketChannel nioSocketChannel = new NioSocketChannel(channel); - HttpClientHandler handler = new HttpClientHandler(nioSocketChannel, latch, content); + HttpClientHandler handler = new HttpClientHandler(nioSocketChannel, latch, content, secure); Consumer exceptionHandler = (e) -> { latch.countDown(); onException(e); @@ -249,17 +268,34 @@ private static class HttpClientHandler implements NioChannelHandler { private final CountDownLatch latch; private final Collection content; - private HttpClientHandler(NioSocketChannel channel, CountDownLatch latch, Collection content) { + private HttpClientHandler( + NioSocketChannel channel, + CountDownLatch latch, + Collection content, + final boolean secure + ) throws IOException { this.latch = latch; this.content = content; final int maxContentLength = Math.toIntExact(new ByteSizeValue(100, ByteSizeUnit.MB).getBytes()); List handlers = new ArrayList<>(5); + + SslHandler sslHandler = null; + if (secure) { + sslHandler = new SslHandler( + SslContextBuilder.forClient() + .clientAuth(ClientAuth.NONE) + .trustManager(InsecureTrustManagerFactory.INSTANCE) + .build() + .newEngine(UnpooledByteBufAllocator.DEFAULT) + ); + } + handlers.add(new HttpResponseDecoder()); handlers.add(new HttpRequestEncoder()); handlers.add(new HttpContentDecompressor()); handlers.add(new HttpObjectAggregator(maxContentLength)); - adaptor = new NettyAdaptor(handlers.toArray(new ChannelHandler[0])); + adaptor = new NettyAdaptor(sslHandler, handlers.toArray(new ChannelHandler[0])); adaptor.addCloseListener((v, e) -> channel.close()); } diff --git a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java index 09594673de5b2..61b42f2a6b77a 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java @@ -193,7 +193,7 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, ) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); - try (NioHttpClient client = new NioHttpClient()) { + try (NioHttpClient client = NioHttpClient.http()) { final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); request.headers().set(HttpHeaderNames.EXPECT, expectation); HttpUtil.setContentLength(request, contentLength); @@ -310,7 +310,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); // Test pre-flight request - try (NioHttpClient client = new NioHttpClient()) { + try (NioHttpClient client = NioHttpClient.http()) { final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.OPTIONS, "/"); request.headers().add(CorsHandler.ORIGIN, "test-cors.org"); request.headers().add(CorsHandler.ACCESS_CONTROL_REQUEST_METHOD, "POST"); @@ -327,7 +327,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th } // Test short-circuited request - try (NioHttpClient client = new NioHttpClient()) { + try (NioHttpClient client = NioHttpClient.http()) { final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); request.headers().add(CorsHandler.ORIGIN, "google.com"); @@ -384,7 +384,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); - try (NioHttpClient client = new NioHttpClient()) { + try (NioHttpClient client = NioHttpClient.http()) { DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, randomFrom("deflate", "gzip")); final FullHttpResponse response = client.send(remoteAddress.address(), request); @@ -451,7 +451,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); - try (NioHttpClient client = new NioHttpClient()) { + try (NioHttpClient client = NioHttpClient.http()) { final String url = "/" + new String(new byte[maxInitialLineLength], Charset.forName("UTF-8")); final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); @@ -514,7 +514,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); - try (NioHttpClient client = new NioHttpClient()) { + try (NioHttpClient client = NioHttpClient.http()) { NioSocketChannel channel = null; try { CountDownLatch channelClosedLatch = new CountDownLatch(1); diff --git a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/ssl/SecureNioHttpServerTransportTests.java b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/ssl/SecureNioHttpServerTransportTests.java new file mode 100644 index 0000000000000..1adfe0370344c --- /dev/null +++ b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/ssl/SecureNioHttpServerTransportTests.java @@ -0,0 +1,558 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.nio.ssl; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.OpenSearchException; +import org.opensearch.common.network.NetworkAddress; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.MockBigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.http.BindHttpException; +import org.opensearch.http.CorsHandler; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.HttpTransportSettings; +import org.opensearch.http.NullDispatcher; +import org.opensearch.http.nio.NioHttpClient; +import org.opensearch.http.nio.NioHttpServerTransport; +import org.opensearch.nio.NioSocketChannel; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.nio.NioGroupFactory; +import org.junit.After; +import org.junit.Before; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.Optional; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.Unpooled; +import io.netty.buffer.UnpooledByteBufAllocator; +import io.netty.handler.codec.TooLongFrameException; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpUtil; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.util.InsecureTrustManagerFactory; + +import static org.opensearch.core.rest.RestStatus.BAD_REQUEST; +import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; +import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +/** + * Tests for the secure {@link NioHttpServerTransport} class. + */ +public class SecureNioHttpServerTransportTests extends OpenSearchTestCase { + + private NetworkService networkService; + private ThreadPool threadPool; + private MockBigArrays bigArrays; + private MockPageCacheRecycler pageRecycler; + private ClusterSettings clusterSettings; + private SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider; + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + threadPool = new TestThreadPool("test"); + pageRecycler = new MockPageCacheRecycler(Settings.EMPTY); + bigArrays = new MockBigArrays(pageRecycler, new NoneCircuitBreakerService()); + clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + + secureHttpTransportSettingsProvider = new SecureHttpTransportSettingsProvider() { + @Override + public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { + return Optional.empty(); + } + + @Override + public Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException { + try { + SSLEngine engine = SslContextBuilder.forServer( + SecureNioHttpServerTransportTests.class.getResourceAsStream("/certificate.crt"), + SecureNioHttpServerTransportTests.class.getResourceAsStream("/certificate.key") + ).trustManager(InsecureTrustManagerFactory.INSTANCE).build().newEngine(UnpooledByteBufAllocator.DEFAULT); + return Optional.of(engine); + } catch (final IOException ex) { + throw new SSLException(ex); + } + } + }; + } + + @After + public void shutdown() throws Exception { + if (threadPool != null) { + threadPool.shutdownNow(); + } + threadPool = null; + networkService = null; + bigArrays = null; + clusterSettings = null; + } + + /** + * Test that {@link NioHttpServerTransport} supports the "Expect: 100-continue" HTTP header + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectContinueHeader() throws InterruptedException { + final Settings settings = createSettings(); + final int contentLength = randomIntBetween(1, HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings).bytesAsInt()); + runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.CONTINUE); + } + + /** + * Test that {@link NioHttpServerTransport} responds to a + * 100-continue expectation with too large a content-length + * with a 413 status. + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectContinueHeaderContentLengthTooLong() throws InterruptedException { + final String key = HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(); + final int maxContentLength = randomIntBetween(1, 104857600); + final Settings settings = createBuilderWithPort().put(key, maxContentLength + "b").build(); + final int contentLength = randomIntBetween(maxContentLength + 1, Integer.MAX_VALUE); + runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE); + } + + /** + * Test that {@link NioHttpServerTransport} responds to an unsupported expectation with a 417 status. + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectUnsupportedExpectation() throws InterruptedException { + Settings settings = createSettings(); + runExpectHeaderTest(settings, "chocolate=yummy", 0, HttpResponseStatus.EXPECTATION_FAILED); + } + + private void runExpectHeaderTest( + final Settings settings, + final String expectation, + final int contentLength, + final HttpResponseStatus expectedStatus + ) throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + channel.sendResponse(new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, new BytesArray("done"))); + } + + @Override + public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + }; + try ( + NioHttpServerTransport transport = new NioHttpServerTransport( + settings, + networkService, + bigArrays, + pageRecycler, + threadPool, + xContentRegistry(), + dispatcher, + new NioGroupFactory(settings, logger), + clusterSettings, + secureHttpTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + try (NioHttpClient client = NioHttpClient.https()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); + request.headers().set(HttpHeaderNames.EXPECT, expectation); + HttpUtil.setContentLength(request, contentLength); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(expectedStatus)); + if (expectedStatus.equals(HttpResponseStatus.CONTINUE)) { + final FullHttpRequest continuationRequest = new DefaultFullHttpRequest( + HttpVersion.HTTP_1_1, + HttpMethod.POST, + "/", + Unpooled.EMPTY_BUFFER + ); + final FullHttpResponse continuationResponse = client.send(remoteAddress.address(), continuationRequest); + try { + assertThat(continuationResponse.status(), is(HttpResponseStatus.OK)); + assertThat( + new String(ByteBufUtil.getBytes(continuationResponse.content()), StandardCharsets.UTF_8), + is("done") + ); + } finally { + continuationResponse.release(); + } + } + } finally { + response.release(); + } + } + } + } + + public void testBindUnavailableAddress() { + Settings initialSettings = createSettings(); + try ( + NioHttpServerTransport transport = new NioHttpServerTransport( + initialSettings, + networkService, + bigArrays, + pageRecycler, + threadPool, + xContentRegistry(), + new NullDispatcher(), + new NioGroupFactory(Settings.EMPTY, logger), + clusterSettings, + secureHttpTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + transport.start(); + TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + Settings settings = Settings.builder() + .put("http.port", remoteAddress.getPort()) + .put("network.host", remoteAddress.getAddress()) + .build(); + try ( + NioHttpServerTransport otherTransport = new NioHttpServerTransport( + settings, + networkService, + bigArrays, + pageRecycler, + threadPool, + xContentRegistry(), + new NullDispatcher(), + new NioGroupFactory(Settings.EMPTY, logger), + clusterSettings, + secureHttpTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + BindHttpException bindHttpException = expectThrows(BindHttpException.class, otherTransport::start); + assertEquals("Failed to bind to " + NetworkAddress.format(remoteAddress.address()), bindHttpException.getMessage()); + } + } + } + + public void testCorsRequest() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError(); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + + }; + + final Settings settings = createBuilderWithPort().put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "test-cors.org") + .build(); + + try ( + NioHttpServerTransport transport = new NioHttpServerTransport( + settings, + networkService, + bigArrays, + pageRecycler, + threadPool, + xContentRegistry(), + dispatcher, + new NioGroupFactory(settings, logger), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + secureHttpTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + // Test pre-flight request + try (NioHttpClient client = NioHttpClient.https()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.OPTIONS, "/"); + request.headers().add(CorsHandler.ORIGIN, "test-cors.org"); + request.headers().add(CorsHandler.ACCESS_CONTROL_REQUEST_METHOD, "POST"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.OK)); + assertThat(response.headers().get(CorsHandler.ACCESS_CONTROL_ALLOW_ORIGIN), equalTo("test-cors.org")); + assertThat(response.headers().get(CorsHandler.VARY), equalTo(CorsHandler.ORIGIN)); + assertTrue(response.headers().contains(CorsHandler.DATE)); + } finally { + response.release(); + } + } + + // Test short-circuited request + try (NioHttpClient client = NioHttpClient.https()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + request.headers().add(CorsHandler.ORIGIN, "google.com"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.FORBIDDEN)); + } finally { + response.release(); + } + } + } + } + + public void testLargeCompressedResponse() throws InterruptedException { + final String responseString = randomAlphaOfLength(4 * 1024 * 1024); + final String url = "/thing"; + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + if (url.equals(request.uri())) { + channel.sendResponse(new BytesRestResponse(OK, responseString)); + } else { + logger.error("--> Unexpected successful uri [{}]", request.uri()); + throw new AssertionError(); + } + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + + }; + + try ( + NioHttpServerTransport transport = new NioHttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + pageRecycler, + threadPool, + xContentRegistry(), + dispatcher, + new NioGroupFactory(Settings.EMPTY, logger), + clusterSettings, + secureHttpTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (NioHttpClient client = NioHttpClient.https()) { + DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); + request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, randomFrom("deflate", "gzip")); + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.OK)); + byte[] bytes = new byte[response.content().readableBytes()]; + response.content().readBytes(bytes); + assertThat(new String(bytes, StandardCharsets.UTF_8), equalTo(responseString)); + } finally { + response.release(); + } + } + } + } + + public void testBadRequest() throws InterruptedException { + final AtomicReference causeReference = new AtomicReference<>(); + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError(); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + causeReference.set(cause); + try { + final OpenSearchException e = new OpenSearchException("you sent a bad request and you should feel bad"); + channel.sendResponse(new BytesRestResponse(channel, BAD_REQUEST, e)); + } catch (final IOException e) { + throw new AssertionError(e); + } + } + + }; + + final Settings settings; + final int maxInitialLineLength; + final Setting httpMaxInitialLineLengthSetting = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; + if (randomBoolean()) { + maxInitialLineLength = httpMaxInitialLineLengthSetting.getDefault(Settings.EMPTY).bytesAsInt(); + settings = createSettings(); + } else { + maxInitialLineLength = randomIntBetween(1, 8192); + settings = createBuilderWithPort().put(httpMaxInitialLineLengthSetting.getKey(), maxInitialLineLength + "b").build(); + } + + try ( + NioHttpServerTransport transport = new NioHttpServerTransport( + settings, + networkService, + bigArrays, + pageRecycler, + threadPool, + xContentRegistry(), + dispatcher, + new NioGroupFactory(settings, logger), + clusterSettings, + secureHttpTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (NioHttpClient client = NioHttpClient.https()) { + final String url = "/" + new String(new byte[maxInitialLineLength], Charset.forName("UTF-8")); + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.BAD_REQUEST)); + assertThat( + new String(response.content().array(), Charset.forName("UTF-8")), + containsString("you sent a bad request and you should feel bad") + ); + } finally { + response.release(); + } + } + } + + assertNotNull(causeReference.get()); + assertThat(causeReference.get(), instanceOf(TooLongFrameException.class)); + } + + public void testReadTimeout() throws Exception { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError("Should not have received a dispatched request"); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError("Should not have received a dispatched request"); + } + + }; + + Settings settings = createBuilderWithPort().put( + HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT.getKey(), + new TimeValue(randomIntBetween(100, 300)) + ).build(); + + try ( + NioHttpServerTransport transport = new NioHttpServerTransport( + settings, + networkService, + bigArrays, + pageRecycler, + threadPool, + xContentRegistry(), + dispatcher, + new NioGroupFactory(settings, logger), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + secureHttpTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (NioHttpClient client = NioHttpClient.https()) { + NioSocketChannel channel = null; + try { + CountDownLatch channelClosedLatch = new CountDownLatch(1); + channel = client.connect(remoteAddress.address()); + channel.addCloseListener((r, t) -> channelClosedLatch.countDown()); + assertTrue("Channel should be closed due to read timeout", channelClosedLatch.await(1, TimeUnit.MINUTES)); + } finally { + if (channel != null) { + channel.close(); + } + } + } + } + } + + private Settings createSettings() { + return createBuilderWithPort().build(); + } + + private Settings.Builder createBuilderWithPort() { + return Settings.builder().put(HttpTransportSettings.SETTING_HTTP_PORT.getKey(), getPortRange()); + } +} diff --git a/plugins/transport-nio/src/test/resources/README.txt b/plugins/transport-nio/src/test/resources/README.txt new file mode 100644 index 0000000000000..a4353cee45a97 --- /dev/null +++ b/plugins/transport-nio/src/test/resources/README.txt @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +# +# This is README describes how the certificates in this directory were created. +# This file can also be executed as a script +# + +# 1. Create certificate key + +openssl req -x509 -sha256 -newkey rsa:2048 -keyout certificate.key -out certificate.crt -days 1024 -nodes + +# 2. Export the certificate in pkcs12 format + +openssl pkcs12 -export -in certificate.crt -inkey certificate.key -out server.p12 -name netty4-secure -password pass:password + diff --git a/plugins/transport-nio/src/test/resources/certificate.crt b/plugins/transport-nio/src/test/resources/certificate.crt new file mode 100644 index 0000000000000..54c78fdbcf6de --- /dev/null +++ b/plugins/transport-nio/src/test/resources/certificate.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDkzCCAnugAwIBAgIUddAawr5zygcd+Dcn9WVDpO4BJ7YwDQYJKoZIhvcNAQEL +BQAwWTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDESMBAGA1UEAwwJbG9jYWxob3N0MB4X +DTI0MDMxNDE5NDQzOVoXDTI3MDEwMjE5NDQzOVowWTELMAkGA1UEBhMCQVUxEzAR +BgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5 +IEx0ZDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAzjOKkg6Iba5zfZ8b/RYw+PGmGEfbdGuuF10Wz4Jmx/Nk4VfDLxdh +TW8VllUL2JD7uPkjABj7pW3awAbvIJ+VGbKqfBr1Nsz0mPPzhT8cfuMH/FDZgQs3 +4HuqDKr0LfC1Kw5E3WF0GVMBDNu0U+nKoeqySeYjGdxDnd3W4cqK5AnUxL0RnIny +Bw7ZuhcU55XndH/Xauro/2EpvJduDsWMdqt7ZfIf1TOmaiQHK+82yb/drVaJbczK +uTpn1Kv2bnzkQEckgq+z1dLNOOyvP2xf+nsziw5ilJe92e5GJOUJYFAlEgUAGpfD +dv6j/gTRYvdJCJItOQEQtektNCAZsoc0wwIDAQABo1MwUTAdBgNVHQ4EFgQUzHts +wIt+zhB/R4U4Do2P6rr0YhkwHwYDVR0jBBgwFoAUzHtswIt+zhB/R4U4Do2P6rr0 +YhkwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAveh870jJX7vt +oLCrdugsyo79pR4f7Nr1kUy3jJrfoaoUmrjiiiHWgT22fGwp7j1GZF2mVfo8YVaK +63YNn5gB2NNZhguPOFC4AdvHRYOKRBOaOvWK8oq7BcJ//18JYI/pPnpgkYvJjqv4 +gFKaZX9qWtujHpAmKiVGs7pwYGNXfixPHRNV4owcfHMIH5dhbbqT49j94xVpjbXs +OymKtFl4kpCE/0LzKFrFcuu55Am1VLBHx2cPpHLOipgUcF5BHFlQ8AXiCMOwfPAw +d22mLB6Gt1oVEpyvQHYd3e04FetEXQ9E8T+NKWZx/8Ucf+IWBYmZBRxch6O83xgk +bAbGzqkbzQ== +-----END CERTIFICATE----- diff --git a/plugins/transport-nio/src/test/resources/certificate.key b/plugins/transport-nio/src/test/resources/certificate.key new file mode 100644 index 0000000000000..228350180935d --- /dev/null +++ b/plugins/transport-nio/src/test/resources/certificate.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDOM4qSDohtrnN9 +nxv9FjD48aYYR9t0a64XXRbPgmbH82ThV8MvF2FNbxWWVQvYkPu4+SMAGPulbdrA +Bu8gn5UZsqp8GvU2zPSY8/OFPxx+4wf8UNmBCzfge6oMqvQt8LUrDkTdYXQZUwEM +27RT6cqh6rJJ5iMZ3EOd3dbhyorkCdTEvRGcifIHDtm6FxTnled0f9dq6uj/YSm8 +l24OxYx2q3tl8h/VM6ZqJAcr7zbJv92tVoltzMq5OmfUq/ZufORARySCr7PV0s04 +7K8/bF/6ezOLDmKUl73Z7kYk5QlgUCUSBQAal8N2/qP+BNFi90kIki05ARC16S00 +IBmyhzTDAgMBAAECggEAVOdiElvLjyX6xeoC00YU6hxOIMdNtHU2HMamwtDV01UD +38mMQ9KjrQelYt4n34drLrHe2IZw75/5J4JzagJrmUY47psHBwaDXItuZRokeJaw +zhLYTEs7OcKRtV+a5WOspUrdzi33aQoFb67zZG3qkpsZyFXrdBV+/fy/Iv+MCvLH +xR0jQ5mzE3cw20R7S4nddChBA/y8oKGOo6QRf2SznC1jL/+yolHvJPEn1v8AUxYm +BMPHxj1O0c4M4IxnJQ3Y5Jy9OaFMyMsFlF1hVhc/3LDDxDyOuBsVsFDicojyrRea +GKngIke0yezy7Wo4NUcp8YQhafonpWVsSJJdOUotcQKBgQD0rihFBXVtcG1d/Vy7 +FvLHrmccD56JNV744LSn2CDM7W1IulNbDUZINdCFqL91u5LpxozeE1FPY1nhwncJ +N7V7XYCaSLCuV1YJzRmUCjnzk2RyopGpzWog3f9uUFGgrk1HGbNAv99k/REya6Iu +IRSkuQhaJOj3bRXzonh0K4GjewKBgQDXvamtCioOUMSP8vq919YMkBw7F+z/fr0p +pamO8HL9eewAUg6N92JQ9kobSo/GptdmdHIjs8LqnS5C3H13GX5Qlf5GskOlCpla +V55ElaSp0gvKwWE168U7gQH4etPQAXXJrOGFaGbPj9W81hTUud7HVE88KYdfWTBo +I7TuE25tWQKBgBRjcr2Vn9xXsvVTCGgamG5lLPhcoNREGz7X0pXt34XT/vhBdnKu +331i5pZMom+YCrzqK5DRwUPBPpseTjb5amj2OKIijn5ojqXQbmI0m/GdBZC71TF2 +CXLlrMQvcy3VeGEFVjd+BYpvwAAYkfIQFZ1IQdbpHnSHpX2guzLK8UmDAoGBANUy +PIcf0EetUVHfkCIjNQfdMcjD8BTcLhsF9vWmcDxFTA9VB8ULf0D64mjt2f85yQsa +b+EQN8KZ6alxMxuLOeRxFYLPj0F9o+Y/R8wHBV48kCKhz2r1v0b6SfQ/jSm1B61x +BrxLW64qOdIOzS8bLyhUDKkrcPesr8V548aRtUKhAoGBAKlNJFd8BCGKD9Td+3dE +oP1iHTX5XZ+cQIqL0e+GMQlK4HnQP566DFZU5/GHNNAfmyxd5iSRwhTqPMHRAmOb +pqQwsyufx0dFeIBxeSO3Z6jW5h2sl4nBipZpw9bzv6EBL1xRr0SfMNZzdnf4JFzc +0htGo/VO93Z2pv8w7uGUz1nN +-----END PRIVATE KEY----- diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java index 0953e51484bd3..8d20650d76583 100644 --- a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java +++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java @@ -181,7 +181,7 @@ private List processRequestsWithBody( private List sendRequests( final InetSocketAddress remoteAddress, final Collection requests, - boolean orderer + boolean ordered ) { final NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup(1); try { @@ -209,7 +209,7 @@ private List sendRequests( ) .toArray(Mono[]::new); - if (orderer == false) { + if (ordered == false) { return ParallelFlux.from(monos).sequential().collectList().block(); } else { return Flux.concat(monos).flatMapSequential(r -> Mono.just(r)).collectList().block(); diff --git a/server/src/main/java/org/opensearch/plugins/SecureHttpTransportSettingsProvider.java b/server/src/main/java/org/opensearch/plugins/SecureHttpTransportSettingsProvider.java index ff86cbc04e240..b7a47b0f4c742 100644 --- a/server/src/main/java/org/opensearch/plugins/SecureHttpTransportSettingsProvider.java +++ b/server/src/main/java/org/opensearch/plugins/SecureHttpTransportSettingsProvider.java @@ -27,6 +27,16 @@ */ @ExperimentalApi public interface SecureHttpTransportSettingsProvider { + /** + * The well-known name of header verifier {@link TransportAdapterProvider} provider instance + */ + final String REQUEST_HEADER_VERIFIER = "HeaderVerifier"; + + /** + * The well-known name of request decompressor {@link TransportAdapterProvider} provider instance + */ + final String REQUEST_DECOMPRESSOR = "RequestDecompressor"; + /** * Collection of additional {@link TransportAdapterProvider}s that are specific to particular HTTP transport * @param settings settings From e07499a771afbc335e1f7f08a82f8197e5826939 Mon Sep 17 00:00:00 2001 From: Robson Araujo Date: Tue, 5 Nov 2024 11:08:13 -0800 Subject: [PATCH 11/30] Improve performance for resolving derived fields (#16564) Doing the type check before the string comparison makes it much faster to resolve derived fields. Signed-off-by: Robson Araujo --- .../opensearch/index/mapper/DefaultDerivedFieldResolver.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/index/mapper/DefaultDerivedFieldResolver.java b/server/src/main/java/org/opensearch/index/mapper/DefaultDerivedFieldResolver.java index 4dd17703b6f55..1e8ef4134a8e7 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DefaultDerivedFieldResolver.java +++ b/server/src/main/java/org/opensearch/index/mapper/DefaultDerivedFieldResolver.java @@ -72,7 +72,7 @@ public Set resolvePattern(String pattern) { Set derivedFields = new HashSet<>(); if (queryShardContext != null && queryShardContext.getMapperService() != null) { for (MappedFieldType fieldType : queryShardContext.getMapperService().fieldTypes()) { - if (Regex.simpleMatch(pattern, fieldType.name()) && fieldType instanceof DerivedFieldType) { + if (fieldType instanceof DerivedFieldType && Regex.simpleMatch(pattern, fieldType.name())) { derivedFields.add(fieldType.name()); } } From 4213cc27305c37ea71e5b5a5addd17e5383e8029 Mon Sep 17 00:00:00 2001 From: Finn Date: Tue, 5 Nov 2024 13:02:07 -0800 Subject: [PATCH 12/30] Make cacheEntry.getIndexInput() privileged when fetching blobs from remote snapshot (#16544) * Make cacheEntry.getIndexInput() privileged when fetching blobs from remote store Signed-off-by: Finn Carroll * Rebase Signed-off-by: Finn Carroll * Spotless apply Signed-off-by: Finn Carroll * Clean up doPrivileged calls Signed-off-by: Finn Carroll * Comment Signed-off-by: Finn Carroll * Move fetchBlob to PrivilegedExceptionAction. Catch and unwrap IOException. Signed-off-by: Finn Carroll * Unused import Signed-off-by: Finn Carroll * Update server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java Co-authored-by: Andriy Redko Signed-off-by: Finn * Typo 'thrown'. Catch and throw unknown exception as IOException. Signed-off-by: Finn Carroll --------- Signed-off-by: Finn Carroll Signed-off-by: Finn Co-authored-by: Andriy Redko --- CHANGELOG.md | 1 + .../store/remote/utils/TransferManager.java | 64 +++++++++++-------- 2 files changed, 40 insertions(+), 25 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bba62e97a49e0..b94483c42c6f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remove resource usages object from search response headers ([#16532](https://github.com/opensearch-project/OpenSearch/pull/16532)) - Support retrieving doc values of unsigned long field ([#16543](https://github.com/opensearch-project/OpenSearch/pull/16543)) - Fix rollover alias supports restored searchable snapshot index([#16483](https://github.com/opensearch-project/OpenSearch/pull/16483)) +- Fix permissions error on scripted query against remote snapshot ([#16544](https://github.com/opensearch-project/OpenSearch/pull/16544)) ### Security diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java b/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java index 94c25202ac90c..77a8ccfafbac2 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java @@ -24,7 +24,8 @@ import java.nio.file.Files; import java.nio.file.Path; import java.security.AccessController; -import java.security.PrivilegedAction; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.atomic.AtomicBoolean; @@ -56,39 +57,52 @@ public TransferManager(final StreamReader streamReader, final FileCache fileCach /** * Given a blobFetchRequestList, return it's corresponding IndexInput. + * + * Note: Scripted queries/aggs may trigger a blob fetch within a new security context. + * As such the following operations require elevated permissions. + * + * cacheEntry.getIndexInput() downloads new blobs from the remote store to local fileCache. + * fileCache.compute() as inserting into the local fileCache may trigger an eviction. + * * @param blobFetchRequest to fetch * @return future of IndexInput augmented with internal caching maintenance tasks */ public IndexInput fetchBlob(BlobFetchRequest blobFetchRequest) throws IOException { - final Path key = blobFetchRequest.getFilePath(); logger.trace("fetchBlob called for {}", key.toString()); - // We need to do a privileged action here in order to fetch from remote - // and write/evict from local file cache in case this is invoked as a side - // effect of a plugin (such as a scripted search) that doesn't have the - // necessary permissions. - final CachedIndexInput cacheEntry = AccessController.doPrivileged((PrivilegedAction) () -> { - return fileCache.compute(key, (path, cachedIndexInput) -> { - if (cachedIndexInput == null || cachedIndexInput.isClosed()) { - logger.trace("Transfer Manager - IndexInput closed or not in cache"); - // Doesn't exist or is closed, either way create a new one - return new DelayedCreationCachedIndexInput(fileCache, streamReader, blobFetchRequest); - } else { - logger.trace("Transfer Manager - Already in cache"); - // already in the cache and ready to be used (open) - return cachedIndexInput; + try { + return AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + CachedIndexInput cacheEntry = fileCache.compute(key, (path, cachedIndexInput) -> { + if (cachedIndexInput == null || cachedIndexInput.isClosed()) { + logger.trace("Transfer Manager - IndexInput closed or not in cache"); + // Doesn't exist or is closed, either way create a new one + return new DelayedCreationCachedIndexInput(fileCache, streamReader, blobFetchRequest); + } else { + logger.trace("Transfer Manager - Already in cache"); + // already in the cache and ready to be used (open) + return cachedIndexInput; + } + }); + + // Cache entry was either retrieved from the cache or newly added, either + // way the reference count has been incremented by one. We can only + // decrement this reference _after_ creating the clone to be returned. + try { + return cacheEntry.getIndexInput().clone(); + } finally { + fileCache.decRef(key); } }); - }); - - // Cache entry was either retrieved from the cache or newly added, either - // way the reference count has been incremented by one. We can only - // decrement this reference _after_ creating the clone to be returned. - try { - return cacheEntry.getIndexInput().clone(); - } finally { - fileCache.decRef(key); + } catch (PrivilegedActionException e) { + final Exception cause = e.getException(); + if (cause instanceof IOException) { + throw (IOException) cause; + } else if (cause instanceof RuntimeException) { + throw (RuntimeException) cause; + } else { + throw new IOException(cause); + } } } From 034bd2b6483c180b4a4439d62452cc50198c37fb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Nov 2024 16:02:00 +0800 Subject: [PATCH 13/30] Bump com.google.apis:google-api-services-compute from v1-rev20241015-2.0.0 to v1-rev20241021-2.0.0 in /plugins/discovery-gce (#16548) * Bump com.google.apis:google-api-services-compute Bumps com.google.apis:google-api-services-compute from v1-rev20241015-2.0.0 to v1-rev20241021-2.0.0. --- updated-dependencies: - dependency-name: com.google.apis:google-api-services-compute dependency-type: direct:production ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 2 +- plugins/discovery-gce/build.gradle | 2 +- .../google-api-services-compute-v1-rev20241015-2.0.0.jar.sha1 | 1 - .../google-api-services-compute-v1-rev20241021-2.0.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20241015-2.0.0.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20241021-2.0.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index b94483c42c6f4..6d9cf50d0da5a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,7 +15,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies - Bump `com.azure:azure-storage-common` from 12.25.1 to 12.27.1 ([#16521](https://github.com/opensearch-project/OpenSearch/pull/16521)) -- Bump `com.google.apis:google-api-services-compute` from v1-rev20240407-2.0.0 to v1-rev20241015-2.0.0 ([#16502](https://github.com/opensearch-project/OpenSearch/pull/16502)) +- Bump `com.google.apis:google-api-services-compute` from v1-rev20240407-2.0.0 to v1-rev20241021-2.0.0 ([#16502](https://github.com/opensearch-project/OpenSearch/pull/16502), [#16548](https://github.com/opensearch-project/OpenSearch/pull/16548)) - Bump `com.azure:azure-storage-blob` from 12.23.0 to 12.28.1 ([#16501](https://github.com/opensearch-project/OpenSearch/pull/16501)) - Bump `org.apache.hadoop:hadoop-minicluster` from 3.4.0 to 3.4.1 ([#16550](https://github.com/opensearch-project/OpenSearch/pull/16550)) diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 4e05544a33f1d..5f4670357f927 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -18,7 +18,7 @@ opensearchplugin { } dependencies { - api "com.google.apis:google-api-services-compute:v1-rev20241015-2.0.0" + api "com.google.apis:google-api-services-compute:v1-rev20241021-2.0.0" api "com.google.api-client:google-api-client:1.35.2" api "com.google.oauth-client:google-oauth-client:1.36.0" api "com.google.http-client:google-http-client:${versions.google_http_client}" diff --git a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20241015-2.0.0.jar.sha1 b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20241015-2.0.0.jar.sha1 deleted file mode 100644 index 1de9a570242fd..0000000000000 --- a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20241015-2.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -83d293916d59ced480e48fd8c0aefb643e27566c \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20241021-2.0.0.jar.sha1 b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20241021-2.0.0.jar.sha1 new file mode 100644 index 0000000000000..309d10035f35a --- /dev/null +++ b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20241021-2.0.0.jar.sha1 @@ -0,0 +1 @@ +cc3bd864ec5ac819699ea24a64109bfda42cb55c \ No newline at end of file From 9f790ee1e89063fd38501e64faf1df7109b3f4ec Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Wed, 6 Nov 2024 11:48:05 -0800 Subject: [PATCH 14/30] Fix non-x64 bwc build targets (#16575) There were a few issues here: the '-x64' suffix was being unconditionally appeneded, debian uses underscores not hyphens, and the rpm target uses the '.86_64' suffix. Signed-off-by: Andrew Ross --- .../InternalDistributionBwcSetupPlugin.java | 24 +++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java index 0502280cb69ad..846c7e0d46b70 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java @@ -181,15 +181,19 @@ private static List resolveArchiveProjects(File checkoutDir if (name.contains("zip") || name.contains("tar")) { int index = name.lastIndexOf('-'); String baseName = name.substring(0, index); - classifier = "-" + baseName + "-x64"; + classifier = "-" + baseName; + // The x64 variants do not have the architecture built into the task name, so it needs to be appended + if (name.equals("darwin-tar") || name.equals("linux-tar") || name.equals("windows-zip")) { + classifier += "-x64"; + } extension = name.substring(index + 1); if (extension.equals("tar")) { extension += ".gz"; } } else if (name.contains("deb")) { - classifier = "-amd64"; + classifier = "_amd64"; } else if (name.contains("rpm")) { - classifier = "-x64"; + classifier = ".x86_64"; } } else { extension = name.substring(4); @@ -256,9 +260,21 @@ private static class DistributionProject { this.name = name; this.projectPath = baseDir + "/" + name; if (version.onOrAfter("1.1.0")) { + // Deb uses underscores (I don't know why...): + // https://github.com/opensearch-project/OpenSearch/blob/f6d9a86f0e2e8241fd58b7e8b6cdeaf931b5108f/distribution/packages/build.gradle#L139 + final String separator = name.equals("deb") ? "_" : "-"; this.distFile = new File( checkoutDir, - baseDir + "/" + name + "/build/distributions/opensearch-min-" + version + "-SNAPSHOT" + classifier + "." + extension + baseDir + + "/" + + name + + "/build/distributions/opensearch-min" + + separator + + version + + "-SNAPSHOT" + + classifier + + "." + + extension ); } else { this.distFile = new File( From aa5c39bbbce5bfcb06f4892ff5d6ccaea79126b2 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 7 Nov 2024 10:44:40 -0500 Subject: [PATCH 15/30] Detect Breaking Changes check does not fail on new method added to an @PublicApi interface (#16585) Signed-off-by: Andriy Redko --- server/build.gradle | 1 + 1 file changed, 1 insertion(+) diff --git a/server/build.gradle b/server/build.gradle index c19e171c90f96..d3c7d4089125c 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -414,6 +414,7 @@ tasks.register("japicmp", me.champeau.gradle.japicmp.JapicmpTask) { onlyModified = true failOnModification = true ignoreMissingClasses = true + failOnSourceIncompatibility = true annotationIncludes = ['@org.opensearch.common.annotation.PublicApi', '@org.opensearch.common.annotation.DeprecatedApi'] annotationExcludes = ['@org.opensearch.common.annotation.InternalApi'] txtOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.txt") From 9b7681c1e56db5dd61787bd6f1ff9015781a8717 Mon Sep 17 00:00:00 2001 From: Jay Deng Date: Thu, 7 Nov 2024 10:29:42 -0800 Subject: [PATCH 16/30] Make IndexStoreListener a pluggable interface (#16583) Signed-off-by: Jay Deng --- CHANGELOG.md | 3 +- .../org/opensearch/env/NodeEnvironment.java | 15 +--- .../index/store/IndexStoreListener.java | 73 +++++++++++++++++++ .../remote/filecache/FileCacheCleaner.java | 3 +- .../main/java/org/opensearch/node/Node.java | 22 +++++- .../opensearch/plugins/IndexStorePlugin.java | 9 +++ .../opensearch/env/NodeEnvironmentTests.java | 42 ++++++++--- 7 files changed, 138 insertions(+), 29 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/store/IndexStoreListener.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 6d9cf50d0da5a..60535b2cca895 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,7 +11,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add logic in master service to optimize performance and retain detailed logging for critical cluster operations. ([#14795](https://github.com/opensearch-project/OpenSearch/pull/14795)) - Add Setting to adjust the primary constraint weights ([#16471](https://github.com/opensearch-project/OpenSearch/pull/16471)) - Switch from `buildSrc/version.properties` to Gradle version catalog (`gradle/libs.versions.toml`) to enable dependabot to perform automated upgrades on common libs ([#16284](https://github.com/opensearch-project/OpenSearch/pull/16284)) -- Add dynamic setting allowing size > 0 requests to be cached in the request cache ([#16483](https://github.com/opensearch-project/OpenSearch/pull/16483/files)) +- Add dynamic setting allowing size > 0 requests to be cached in the request cache ([#16483](https://github.com/opensearch-project/OpenSearch/pull/16483)) +- Make IndexStoreListener a pluggable interface ([#16583](https://github.com/opensearch-project/OpenSearch/pull/16583)) ### Dependencies - Bump `com.azure:azure-storage-common` from 12.25.1 to 12.27.1 ([#16521](https://github.com/opensearch-project/OpenSearch/pull/16521)) diff --git a/server/src/main/java/org/opensearch/env/NodeEnvironment.java b/server/src/main/java/org/opensearch/env/NodeEnvironment.java index 709c0eba4f57f..5c6e44d063dd7 100644 --- a/server/src/main/java/org/opensearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/opensearch/env/NodeEnvironment.java @@ -71,6 +71,7 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.FsDirectoryFactory; +import org.opensearch.index.store.IndexStoreListener; import org.opensearch.monitor.fs.FsInfo; import org.opensearch.monitor.fs.FsProbe; import org.opensearch.monitor.jvm.JvmInfo; @@ -1412,18 +1413,4 @@ private static void tryWriteTempFile(Path path) throws IOException { } } } - - /** - * A listener that is executed on per-index and per-shard store events, like deleting shard path - * - * @opensearch.internal - */ - public interface IndexStoreListener { - default void beforeShardPathDeleted(ShardId shardId, IndexSettings indexSettings, NodeEnvironment env) {} - - default void beforeIndexPathDeleted(Index index, IndexSettings indexSettings, NodeEnvironment env) {} - - IndexStoreListener EMPTY = new IndexStoreListener() { - }; - } } diff --git a/server/src/main/java/org/opensearch/index/store/IndexStoreListener.java b/server/src/main/java/org/opensearch/index/store/IndexStoreListener.java new file mode 100644 index 0000000000000..5a8dd28d43bbc --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/IndexStoreListener.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.IndexSettings; + +import java.util.Collections; +import java.util.List; + +/** + * A listener that is executed on per-index and per-shard store events, like deleting shard path + * + * @opensearch.api + */ +@PublicApi(since = "2.19.0") +public interface IndexStoreListener { + default void beforeShardPathDeleted(ShardId shardId, IndexSettings indexSettings, NodeEnvironment env) {} + + default void beforeIndexPathDeleted(Index index, IndexSettings indexSettings, NodeEnvironment env) {} + + IndexStoreListener EMPTY = new IndexStoreListener() { + }; + + /** + * A Composite listener that multiplexes calls to each of the listeners methods. + * + * @opensearch.api + */ + @PublicApi(since = "2.19.0") + final class CompositeIndexStoreListener implements IndexStoreListener { + private final List listeners; + private final static Logger logger = LogManager.getLogger(CompositeIndexStoreListener.class); + + public CompositeIndexStoreListener(List listeners) { + this.listeners = Collections.unmodifiableList(listeners); + } + + @Override + public void beforeShardPathDeleted(ShardId shardId, IndexSettings indexSettings, NodeEnvironment env) { + for (IndexStoreListener listener : listeners) { + try { + listener.beforeShardPathDeleted(shardId, indexSettings, env); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("beforeShardPathDeleted listener [{}] failed", listener), e); + } + } + } + + @Override + public void beforeIndexPathDeleted(Index index, IndexSettings indexSettings, NodeEnvironment env) { + for (IndexStoreListener listener : listeners) { + try { + listener.beforeIndexPathDeleted(index, indexSettings, env); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("beforeIndexPathDeleted listener [{}] failed", listener), e); + } + } + } + } +} diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java index 0261ab24dfa7a..3cdd41b94a5e9 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java @@ -18,6 +18,7 @@ import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.ShardPath; +import org.opensearch.index.store.IndexStoreListener; import java.io.IOException; import java.nio.file.DirectoryStream; @@ -33,7 +34,7 @@ * * @opensearch.internal */ -public class FileCacheCleaner implements NodeEnvironment.IndexStoreListener { +public class FileCacheCleaner implements IndexStoreListener { private static final Logger logger = LogManager.getLogger(FileCacheCleaner.class); private final Provider fileCacheProvider; diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index e74fca60b0201..c78ee6711dcda 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -157,6 +157,7 @@ import org.opensearch.index.recovery.RemoteStoreRestoreService; import org.opensearch.index.remote.RemoteIndexPathUploader; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; +import org.opensearch.index.store.IndexStoreListener; import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.filecache.FileCacheCleaner; @@ -548,10 +549,27 @@ protected Node( */ this.environment = new Environment(settings, initialEnvironment.configDir(), Node.NODE_LOCAL_STORAGE_SETTING.get(settings)); Environment.assertEquivalent(initialEnvironment, this.environment); + Stream indexStoreListenerStream = pluginsService.filterPlugins(IndexStorePlugin.class) + .stream() + .map(IndexStorePlugin::getIndexStoreListener) + .filter(Optional::isPresent) + .map(Optional::get); + // FileCache is only initialized on search nodes, so we only create FileCacheCleaner on search nodes as well if (DiscoveryNode.isSearchNode(settings) == false) { - nodeEnvironment = new NodeEnvironment(tmpSettings, environment); + nodeEnvironment = new NodeEnvironment( + settings, + environment, + new IndexStoreListener.CompositeIndexStoreListener(indexStoreListenerStream.collect(Collectors.toList())) + ); } else { - nodeEnvironment = new NodeEnvironment(settings, environment, new FileCacheCleaner(this::fileCache)); + nodeEnvironment = new NodeEnvironment( + settings, + environment, + new IndexStoreListener.CompositeIndexStoreListener( + Stream.concat(indexStoreListenerStream, Stream.of(new FileCacheCleaner(this::fileCache))) + .collect(Collectors.toList()) + ) + ); } logger.info( "node name [{}], node ID [{}], cluster name [{}], roles {}", diff --git a/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java b/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java index ebd5717a00319..f0df8a122ed7d 100644 --- a/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java @@ -39,11 +39,13 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.ShardPath; +import org.opensearch.index.store.IndexStoreListener; import org.opensearch.indices.recovery.RecoveryState; import java.io.IOException; import java.util.Collections; import java.util.Map; +import java.util.Optional; /** * A plugin that provides alternative directory implementations. @@ -105,4 +107,11 @@ interface RecoveryStateFactory { default Map getRecoveryStateFactories() { return Collections.emptyMap(); } + + /** + * The {@link IndexStoreListener}s for this plugin which are triggered upon shard/index path deletion + */ + default Optional getIndexStoreListener() { + return Optional.empty(); + } } diff --git a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java index 962eb743dca6e..3ee9e859c198f 100644 --- a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java @@ -45,6 +45,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.gateway.MetadataStateFormat; import org.opensearch.index.IndexSettings; +import org.opensearch.index.store.IndexStoreListener; import org.opensearch.node.Node; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.NodeRoles; @@ -360,24 +361,39 @@ protected void doRun() throws Exception { } public void testIndexStoreListener() throws Exception { - final AtomicInteger shardCounter = new AtomicInteger(0); - final AtomicInteger indexCounter = new AtomicInteger(0); + final AtomicInteger shardCounter1 = new AtomicInteger(0); + final AtomicInteger shardCounter2 = new AtomicInteger(0); + final AtomicInteger indexCounter1 = new AtomicInteger(0); + final AtomicInteger indexCounter2 = new AtomicInteger(0); final Index index = new Index("foo", "fooUUID"); final ShardId shardId = new ShardId(index, 0); - final NodeEnvironment.IndexStoreListener listener = new NodeEnvironment.IndexStoreListener() { + final IndexStoreListener listener1 = new IndexStoreListener() { @Override public void beforeShardPathDeleted(ShardId inShardId, IndexSettings indexSettings, NodeEnvironment env) { assertEquals(shardId, inShardId); - shardCounter.incrementAndGet(); + shardCounter1.incrementAndGet(); } @Override public void beforeIndexPathDeleted(Index inIndex, IndexSettings indexSettings, NodeEnvironment env) { assertEquals(index, inIndex); - indexCounter.incrementAndGet(); + indexCounter1.incrementAndGet(); } }; - final NodeEnvironment env = newNodeEnvironment(listener); + final IndexStoreListener listener2 = new IndexStoreListener() { + @Override + public void beforeShardPathDeleted(ShardId inShardId, IndexSettings indexSettings, NodeEnvironment env) { + assertEquals(shardId, inShardId); + shardCounter2.incrementAndGet(); + } + + @Override + public void beforeIndexPathDeleted(Index inIndex, IndexSettings indexSettings, NodeEnvironment env) { + assertEquals(index, inIndex); + indexCounter2.incrementAndGet(); + } + }; + final NodeEnvironment env = newNodeEnvironment(new IndexStoreListener.CompositeIndexStoreListener(List.of(listener1, listener2))); for (Path path : env.indexPaths(index)) { Files.createDirectories(path.resolve("0")); @@ -386,26 +402,30 @@ public void beforeIndexPathDeleted(Index inIndex, IndexSettings indexSettings, N for (Path path : env.indexPaths(index)) { assertTrue(Files.exists(path.resolve("0"))); } - assertEquals(0, shardCounter.get()); + assertEquals(0, shardCounter1.get()); + assertEquals(0, shardCounter2.get()); env.deleteShardDirectorySafe(new ShardId(index, 0), idxSettings); for (Path path : env.indexPaths(index)) { assertFalse(Files.exists(path.resolve("0"))); } - assertEquals(1, shardCounter.get()); + assertEquals(1, shardCounter1.get()); + assertEquals(1, shardCounter2.get()); for (Path path : env.indexPaths(index)) { assertTrue(Files.exists(path)); } - assertEquals(0, indexCounter.get()); + assertEquals(0, indexCounter1.get()); + assertEquals(0, indexCounter2.get()); env.deleteIndexDirectorySafe(index, 5000, idxSettings); for (Path path : env.indexPaths(index)) { assertFalse(Files.exists(path)); } - assertEquals(1, indexCounter.get()); + assertEquals(1, indexCounter1.get()); + assertEquals(1, indexCounter2.get()); assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty()); env.close(); } @@ -680,7 +700,7 @@ public NodeEnvironment newNodeEnvironment() throws IOException { return newNodeEnvironment(Settings.EMPTY); } - public NodeEnvironment newNodeEnvironment(NodeEnvironment.IndexStoreListener listener) throws IOException { + public NodeEnvironment newNodeEnvironment(IndexStoreListener listener) throws IOException { Settings build = buildEnvSettings(Settings.EMPTY); return new NodeEnvironment(build, TestEnvironment.newEnvironment(build), listener); } From 5909e1ad30ae00476d121536a5cb415eafc15a9e Mon Sep 17 00:00:00 2001 From: "mend-for-github-com[bot]" <50673670+mend-for-github-com[bot]@users.noreply.github.com> Date: Thu, 7 Nov 2024 14:56:58 -0500 Subject: [PATCH 17/30] Update dependency org.apache.zookeeper:zookeeper to v3.9.3 (#16593) Co-authored-by: mend-for-github-com[bot] <50673670+mend-for-github-com[bot]@users.noreply.github.com> --- test/fixtures/hdfs-fixture/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 18bcee8b338fc..8a402879970d7 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -71,7 +71,7 @@ dependencies { api "org.jetbrains.kotlin:kotlin-stdlib:${versions.kotlin}" api "org.eclipse.jetty:jetty-server:${versions.jetty}" api "org.eclipse.jetty.websocket:javax-websocket-server-impl:${versions.jetty}" - api 'org.apache.zookeeper:zookeeper:3.9.2' + api 'org.apache.zookeeper:zookeeper:3.9.3' api "org.apache.commons:commons-text:1.12.0" api "commons-net:commons-net:3.11.1" api "ch.qos.logback:logback-core:1.5.12" From e68838819710d7040cf2b591590285f1b86f0da0 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 8 Nov 2024 10:42:53 -0500 Subject: [PATCH 18/30] [AUTO] [main] Add bwc version 2.18.1. (#16573) * Add bwc version 2.18.1 Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> * Update Version.java Signed-off-by: Andriy Redko --------- Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Andriy Redko Co-authored-by: opensearch-ci-bot <83309141+opensearch-ci-bot@users.noreply.github.com> Co-authored-by: Andriy Redko --- .ci/bwcVersions | 1 + libs/core/src/main/java/org/opensearch/Version.java | 1 + 2 files changed, 2 insertions(+) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 17bb5a7df9b21..d1b4e4c509cb9 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -41,4 +41,5 @@ BWC_VERSION: - "2.17.1" - "2.17.2" - "2.18.0" + - "2.18.1" - "2.19.0" \ No newline at end of file diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 4d685e3bc654a..ec0a18dbbf882 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -112,6 +112,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_17_1 = new Version(2170199, org.apache.lucene.util.Version.LUCENE_9_11_1); public static final Version V_2_17_2 = new Version(2170299, org.apache.lucene.util.Version.LUCENE_9_11_1); public static final Version V_2_18_0 = new Version(2180099, org.apache.lucene.util.Version.LUCENE_9_12_0); + public static final Version V_2_18_1 = new Version(2180199, org.apache.lucene.util.Version.LUCENE_9_12_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_12_0); public static final Version V_2_19_0 = new Version(2190099, org.apache.lucene.util.Version.LUCENE_9_12_0); public static final Version CURRENT = V_3_0_0; From 10873f16e43780dbac4bf879e3324285461581cc Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Mon, 11 Nov 2024 16:45:43 +0530 Subject: [PATCH 19/30] Increase segrep pressure checkpoint default limit to 10 (#16577) Signed-off-by: Gaurav Bafna --- CHANGELOG.md | 1 + .../opensearch/index/SegmentReplicationPressureService.java | 2 +- .../index/SegmentReplicationPressureServiceTests.java | 3 +++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 60535b2cca895..a0529d8fa6b63 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add logic in master service to optimize performance and retain detailed logging for critical cluster operations. ([#14795](https://github.com/opensearch-project/OpenSearch/pull/14795)) - Add Setting to adjust the primary constraint weights ([#16471](https://github.com/opensearch-project/OpenSearch/pull/16471)) - Switch from `buildSrc/version.properties` to Gradle version catalog (`gradle/libs.versions.toml`) to enable dependabot to perform automated upgrades on common libs ([#16284](https://github.com/opensearch-project/OpenSearch/pull/16284)) +- Increase segrep pressure checkpoint default limit to 30 ([#16577](https://github.com/opensearch-project/OpenSearch/pull/16577/files)) - Add dynamic setting allowing size > 0 requests to be cached in the request cache ([#16483](https://github.com/opensearch-project/OpenSearch/pull/16483)) - Make IndexStoreListener a pluggable interface ([#16583](https://github.com/opensearch-project/OpenSearch/pull/16583)) diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java index 297fe093f7f4e..03b162a9c1755 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java @@ -60,7 +60,7 @@ public class SegmentReplicationPressureService implements Closeable { public static final Setting MAX_INDEXING_CHECKPOINTS = Setting.intSetting( "segrep.pressure.checkpoint.limit", - 4, + 30, 1, Setting.Property.Dynamic, Setting.Property.NodeScope diff --git a/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java b/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java index a9725f638cc53..166c0e16bfe8b 100644 --- a/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java @@ -33,6 +33,7 @@ import org.mockito.stubbing.Answer; import static java.util.Arrays.asList; +import static org.opensearch.index.SegmentReplicationPressureService.MAX_INDEXING_CHECKPOINTS; import static org.opensearch.index.SegmentReplicationPressureService.MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING; import static org.opensearch.index.SegmentReplicationPressureService.MAX_REPLICATION_TIME_BACKPRESSURE_SETTING; import static org.opensearch.index.SegmentReplicationPressureService.SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED; @@ -53,6 +54,7 @@ public class SegmentReplicationPressureServiceTests extends OpenSearchIndexLevel .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.getKey(), true) .put(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.getKey(), TimeValue.timeValueSeconds(5)) + .put(MAX_INDEXING_CHECKPOINTS.getKey(), 4) .build(); public void testIsSegrepLimitBreached() throws Exception { @@ -200,6 +202,7 @@ public void testFailStaleReplicaTask() throws Exception { .put(SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.getKey(), true) .put(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.getKey(), TimeValue.timeValueMillis(10)) .put(MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING.getKey(), TimeValue.timeValueMillis(20)) + .put(MAX_INDEXING_CHECKPOINTS.getKey(), 4) .build(); try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { From 607a08e465014a9f8615ee30f3d5d402284ea9ff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 09:56:42 -0500 Subject: [PATCH 20/30] Bump lycheeverse/lychee-action from 2.0.2 to 2.1.0 (#16610) * Bump lycheeverse/lychee-action from 2.0.2 to 2.1.0 Bumps [lycheeverse/lychee-action](https://github.com/lycheeverse/lychee-action) from 2.0.2 to 2.1.0. - [Release notes](https://github.com/lycheeverse/lychee-action/releases) - [Commits](https://github.com/lycheeverse/lychee-action/compare/v2.0.2...v2.1.0) --- updated-dependencies: - dependency-name: lycheeverse/lychee-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- .github/workflows/links.yml | 2 +- CHANGELOG.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index cadbe71bb6ea8..3697750dab97a 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -13,7 +13,7 @@ jobs: - uses: actions/checkout@v4 - name: lychee Link Checker id: lychee - uses: lycheeverse/lychee-action@v2.0.2 + uses: lycheeverse/lychee-action@v2.1.0 with: args: --accept=200,403,429 --exclude-mail **/*.html **/*.md **/*.txt **/*.json --exclude-file .lychee.excludes fail: true diff --git a/CHANGELOG.md b/CHANGELOG.md index a0529d8fa6b63..e95a95990beaf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.apis:google-api-services-compute` from v1-rev20240407-2.0.0 to v1-rev20241021-2.0.0 ([#16502](https://github.com/opensearch-project/OpenSearch/pull/16502), [#16548](https://github.com/opensearch-project/OpenSearch/pull/16548)) - Bump `com.azure:azure-storage-blob` from 12.23.0 to 12.28.1 ([#16501](https://github.com/opensearch-project/OpenSearch/pull/16501)) - Bump `org.apache.hadoop:hadoop-minicluster` from 3.4.0 to 3.4.1 ([#16550](https://github.com/opensearch-project/OpenSearch/pull/16550)) +- Bump `lycheeverse/lychee-action` from 2.0.2 to 2.1.0 ([#16610](https://github.com/opensearch-project/OpenSearch/pull/16610)) ### Changed From 6e34a8024a2b884143f101f03e6ebffab9eed6e6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 12:25:47 -0500 Subject: [PATCH 21/30] Bump me.champeau.gradle.japicmp from 0.4.4 to 0.4.5 in /server (#16614) * Bump me.champeau.gradle.japicmp from 0.4.4 to 0.4.5 in /server Bumps me.champeau.gradle.japicmp from 0.4.4 to 0.4.5. --- updated-dependencies: - dependency-name: me.champeau.gradle.japicmp dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + server/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e95a95990beaf..d3086096cb8f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.azure:azure-storage-blob` from 12.23.0 to 12.28.1 ([#16501](https://github.com/opensearch-project/OpenSearch/pull/16501)) - Bump `org.apache.hadoop:hadoop-minicluster` from 3.4.0 to 3.4.1 ([#16550](https://github.com/opensearch-project/OpenSearch/pull/16550)) - Bump `lycheeverse/lychee-action` from 2.0.2 to 2.1.0 ([#16610](https://github.com/opensearch-project/OpenSearch/pull/16610)) +- Bump `me.champeau.gradle.japicmp` from 0.4.4 to 0.4.5 ([#16614](https://github.com/opensearch-project/OpenSearch/pull/16614)) ### Changed diff --git a/server/build.gradle b/server/build.gradle index d3c7d4089125c..f1679ccfbec30 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -36,7 +36,7 @@ plugins { id('opensearch.publish') id('opensearch.internal-cluster-test') id('opensearch.optional-dependencies') - id('me.champeau.gradle.japicmp') version '0.4.4' + id('me.champeau.gradle.japicmp') version '0.4.5' } publishing { From 46ded36a27e276a1644a65b8c8ca439e5cbe4b91 Mon Sep 17 00:00:00 2001 From: Owais Kazi Date: Mon, 11 Nov 2024 10:25:32 -0800 Subject: [PATCH 22/30] Adds Integration Tests for Search Pipeline (#16561) * Adds Integration Tests for Search Pipeline Signed-off-by: Owais * Addressed comments Signed-off-by: Owais --------- Signed-off-by: Owais --- .../common/SearchPipelineCommonIT.java | 186 +++++++++++++++--- 1 file changed, 155 insertions(+), 31 deletions(-) diff --git a/modules/search-pipeline-common/src/internalClusterTest/java/org/opensearch/search/pipeline/common/SearchPipelineCommonIT.java b/modules/search-pipeline-common/src/internalClusterTest/java/org/opensearch/search/pipeline/common/SearchPipelineCommonIT.java index b8b0798812df1..35d748bc2d06e 100644 --- a/modules/search-pipeline-common/src/internalClusterTest/java/org/opensearch/search/pipeline/common/SearchPipelineCommonIT.java +++ b/modules/search-pipeline-common/src/internalClusterTest/java/org/opensearch/search/pipeline/common/SearchPipelineCommonIT.java @@ -8,41 +8,187 @@ package org.opensearch.search.pipeline.common; -import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.refresh.RefreshRequest; import org.opensearch.action.admin.indices.refresh.RefreshResponse; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.DeleteSearchPipelineRequest; +import org.opensearch.action.search.GetSearchPipelineRequest; +import org.opensearch.action.search.GetSearchPipelineResponse; import org.opensearch.action.search.PutSearchPipelineRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.ingest.PipelineConfiguration; import org.opensearch.plugins.Plugin; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; +import org.junit.Before; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; @OpenSearchIntegTestCase.SuiteScopeTestCase public class SearchPipelineCommonIT extends OpenSearchIntegTestCase { + private static final String TEST_INDEX = "myindex"; + private static final String PIPELINE_NAME = "test_pipeline"; + @Override protected Collection> nodePlugins() { return List.of(SearchPipelineCommonModulePlugin.class); } + @Before + public void setup() throws Exception { + createIndex(TEST_INDEX); + + IndexRequest doc1 = new IndexRequest(TEST_INDEX).id("doc1").source(Map.of("field", "value")); + IndexRequest doc2 = new IndexRequest(TEST_INDEX).id("doc2").source(Map.of("field", "something else")); + + IndexResponse ir = client().index(doc1).actionGet(); + assertSame(RestStatus.CREATED, ir.status()); + ir = client().index(doc2).actionGet(); + assertSame(RestStatus.CREATED, ir.status()); + + RefreshResponse refRsp = client().admin().indices().refresh(new RefreshRequest(TEST_INDEX)).actionGet(); + assertSame(RestStatus.OK, refRsp.getStatus()); + } + + @After + public void cleanup() throws Exception { + internalCluster().wipeIndices(TEST_INDEX); + } + public void testFilterQuery() { // Create a pipeline with a filter_query processor. - String pipelineName = "foo"; + createPipeline(); + + // Search without the pipeline. Should see both documents. + SearchRequest req = new SearchRequest(TEST_INDEX).source(new SearchSourceBuilder().query(new MatchAllQueryBuilder())); + SearchResponse rsp = client().search(req).actionGet(); + assertEquals(2, rsp.getHits().getTotalHits().value); + + // Search with the pipeline. Should only see document with "field":"value". + req.pipeline(PIPELINE_NAME); + rsp = client().search(req).actionGet(); + assertEquals(1, rsp.getHits().getTotalHits().value); + + // Clean up. + deletePipeline(); + } + + public void testSearchWithTemporaryPipeline() throws Exception { + + // Search without the pipeline. Should see both documents. + SearchRequest req = new SearchRequest(TEST_INDEX).source(new SearchSourceBuilder().query(new MatchAllQueryBuilder())); + SearchResponse rsp = client().search(req).actionGet(); + assertEquals(2, rsp.getHits().getTotalHits().value); + + // Search with temporary pipeline + Map pipelineSourceMap = new HashMap<>(); + Map requestProcessorConfig = new HashMap<>(); + + Map filterQuery = new HashMap<>(); + filterQuery.put("query", Map.of("term", Map.of("field", "value"))); + requestProcessorConfig.put("filter_query", filterQuery); + pipelineSourceMap.put("request_processors", List.of(requestProcessorConfig)); + + req = new SearchRequest(TEST_INDEX).source( + new SearchSourceBuilder().query(new MatchAllQueryBuilder()).searchPipelineSource(pipelineSourceMap) + ); + + SearchResponse rspWithTempPipeline = client().search(req).actionGet(); + assertEquals(1, rspWithTempPipeline.getHits().getTotalHits().value); + } + + public void testSearchWithDefaultPipeline() throws Exception { + // Create pipeline + createPipeline(); + + // Search without the pipeline. Should see both documents. + SearchRequest req = new SearchRequest(TEST_INDEX).source(new SearchSourceBuilder().query(new MatchAllQueryBuilder())); + SearchResponse rsp = client().search(req).actionGet(); + assertEquals(2, rsp.getHits().getTotalHits().value); + + // Set pipeline as default for the index + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(TEST_INDEX); + updateSettingsRequest.settings(Settings.builder().put("index.search.default_pipeline", PIPELINE_NAME)); + AcknowledgedResponse updateSettingsResponse = client().admin().indices().updateSettings(updateSettingsRequest).actionGet(); + assertTrue(updateSettingsResponse.isAcknowledged()); + + // Search with the default pipeline. Should only see document with "field":"value". + rsp = client().search(req).actionGet(); + assertEquals(1, rsp.getHits().getTotalHits().value); + + // Clean up: Remove default pipeline setting + updateSettingsRequest = new UpdateSettingsRequest(TEST_INDEX); + updateSettingsRequest.settings(Settings.builder().putNull("index.search.default_pipeline")); + updateSettingsResponse = client().admin().indices().updateSettings(updateSettingsRequest).actionGet(); + assertTrue(updateSettingsResponse.isAcknowledged()); + + // Clean up. + deletePipeline(); + } + + public void testUpdateSearchPipeline() throws Exception { + // Create initial pipeline + createPipeline(); + + // Verify initial pipeline + SearchRequest req = new SearchRequest(TEST_INDEX).source(new SearchSourceBuilder().query(new MatchAllQueryBuilder())); + req.pipeline(PIPELINE_NAME); + SearchResponse initialRsp = client().search(req).actionGet(); + assertEquals(1, initialRsp.getHits().getTotalHits().value); + + BytesReference pipelineConfig = new BytesArray( + "{" + + "\"description\": \"Updated pipeline\"," + + "\"request_processors\": [" + + "{" + + "\"filter_query\" : {" + + "\"query\": {" + + "\"term\" : {" + + "\"field\" : \"something else\"" + + "}" + + "}" + + "}" + + "}" + + "]" + + "}" + ); + + PipelineConfiguration pipeline = new PipelineConfiguration(PIPELINE_NAME, pipelineConfig, MediaTypeRegistry.JSON); + + // Update pipeline + PutSearchPipelineRequest updateRequest = new PutSearchPipelineRequest(pipeline.getId(), pipelineConfig, MediaTypeRegistry.JSON); + AcknowledgedResponse ackRsp = client().admin().cluster().putSearchPipeline(updateRequest).actionGet(); + assertTrue(ackRsp.isAcknowledged()); + + // Verify pipeline description + GetSearchPipelineResponse getPipelineResponse = client().admin() + .cluster() + .getSearchPipeline(new GetSearchPipelineRequest(PIPELINE_NAME)) + .actionGet(); + assertEquals(PIPELINE_NAME, getPipelineResponse.pipelines().get(0).getId()); + assertEquals(pipeline.getConfigAsMap(), getPipelineResponse.pipelines().get(0).getConfigAsMap()); + // Clean up. + deletePipeline(); + } + + private void createPipeline() { PutSearchPipelineRequest putSearchPipelineRequest = new PutSearchPipelineRequest( - pipelineName, + PIPELINE_NAME, new BytesArray( "{" + "\"request_processors\": [" @@ -62,35 +208,13 @@ public void testFilterQuery() { ); AcknowledgedResponse ackRsp = client().admin().cluster().putSearchPipeline(putSearchPipelineRequest).actionGet(); assertTrue(ackRsp.isAcknowledged()); + } - // Index some documents. - String indexName = "myindex"; - IndexRequest doc1 = new IndexRequest(indexName).id("doc1").source(Map.of("field", "value")); - IndexRequest doc2 = new IndexRequest(indexName).id("doc2").source(Map.of("field", "something else")); - - IndexResponse ir = client().index(doc1).actionGet(); - assertSame(RestStatus.CREATED, ir.status()); - ir = client().index(doc2).actionGet(); - assertSame(RestStatus.CREATED, ir.status()); - - // Refresh so the documents are visible to search. - RefreshResponse refRsp = client().admin().indices().refresh(new RefreshRequest(indexName)).actionGet(); - assertSame(RestStatus.OK, refRsp.getStatus()); - - // Search without the pipeline. Should see both documents. - SearchRequest req = new SearchRequest(indexName).source(new SearchSourceBuilder().query(new MatchAllQueryBuilder())); - SearchResponse rsp = client().search(req).actionGet(); - assertEquals(2, rsp.getHits().getTotalHits().value); - - // Search with the pipeline. Should only see document with "field":"value". - req.pipeline(pipelineName); - rsp = client().search(req).actionGet(); - assertEquals(1, rsp.getHits().getTotalHits().value); - - // Clean up. - ackRsp = client().admin().cluster().deleteSearchPipeline(new DeleteSearchPipelineRequest(pipelineName)).actionGet(); - assertTrue(ackRsp.isAcknowledged()); - ackRsp = client().admin().indices().delete(new DeleteIndexRequest(indexName)).actionGet(); + private void deletePipeline() { + AcknowledgedResponse ackRsp = client().admin() + .cluster() + .deleteSearchPipeline(new DeleteSearchPipelineRequest(PIPELINE_NAME)) + .actionGet(); assertTrue(ackRsp.isAcknowledged()); } } From c9edb48c1858164cf86c2cc1c7f7d493e01a62bc Mon Sep 17 00:00:00 2001 From: Heemin Kim Date: Mon, 11 Nov 2024 10:59:30 -0800 Subject: [PATCH 23/30] Add a flag in QueryShardContext to differentiate between a normal query and an inner hit query (#16600) Signed-off-by: Heemin Kim --- CHANGELOG.md | 1 + .../org/opensearch/index/query/NestedQueryBuilder.java | 2 ++ .../org/opensearch/index/query/QueryShardContext.java | 9 +++++++++ .../opensearch/index/query/NestedQueryBuilderTests.java | 4 ++++ 4 files changed, 16 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d3086096cb8f2..8d552b5eed83b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Increase segrep pressure checkpoint default limit to 30 ([#16577](https://github.com/opensearch-project/OpenSearch/pull/16577/files)) - Add dynamic setting allowing size > 0 requests to be cached in the request cache ([#16483](https://github.com/opensearch-project/OpenSearch/pull/16483)) - Make IndexStoreListener a pluggable interface ([#16583](https://github.com/opensearch-project/OpenSearch/pull/16583)) +- Add a flag in QueryShardContext to differentiate inner hit query ([#16600](https://github.com/opensearch-project/OpenSearch/pull/16600)) ### Dependencies - Bump `com.azure:azure-storage-common` from 12.25.1 to 12.27.1 ([#16521](https://github.com/opensearch-project/OpenSearch/pull/16521)) diff --git a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java index 5908882472ce7..ec7e62035a82f 100644 --- a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java @@ -413,6 +413,7 @@ protected void doBuild(SearchContext parentSearchContext, InnerHitsContext inner try { queryShardContext.setParentFilter(parentFilter); queryShardContext.nestedScope().nextLevel(nestedObjectMapper); + queryShardContext.setInnerHitQuery(true); try { NestedInnerHitSubContext nestedInnerHits = new NestedInnerHitSubContext( name, @@ -427,6 +428,7 @@ protected void doBuild(SearchContext parentSearchContext, InnerHitsContext inner } } finally { queryShardContext.setParentFilter(previousParentFilter); + queryShardContext.setInnerHitQuery(false); } } } diff --git a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java index bccead2b029d0..d717f10b17d9c 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java +++ b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java @@ -126,6 +126,7 @@ public class QueryShardContext extends QueryRewriteContext { private BitSetProducer parentFilter; private DerivedFieldResolver derivedFieldResolver; private boolean keywordIndexOrDocValuesEnabled; + private boolean isInnerHitQuery; public QueryShardContext( int shardId, @@ -727,4 +728,12 @@ public BitSetProducer getParentFilter() { public void setParentFilter(BitSetProducer parentFilter) { this.parentFilter = parentFilter; } + + public boolean isInnerHitQuery() { + return isInnerHitQuery; + } + + public void setInnerHitQuery(boolean isInnerHitQuery) { + this.isInnerHitQuery = isInnerHitQuery; + } } diff --git a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java index 351011eb1b812..c367d123402d4 100644 --- a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java @@ -335,6 +335,9 @@ public void testParentFilterFromInlineLeafInnerHitsNestedQuery() throws Exceptio if (context.getParentFilter() == null) { throw new Exception("Expect parent filter to be non-null"); } + if (context.isInnerHitQuery() == false) { + throw new Exception("Expect it to be inner hit query"); + } return invoke.callRealMethod(); }); NestedQueryBuilder query = new NestedQueryBuilder("nested1", innerQueryBuilder, ScoreMode.None); @@ -345,6 +348,7 @@ public void testParentFilterFromInlineLeafInnerHitsNestedQuery() throws Exceptio assertThat(innerHitBuilders.size(), Matchers.equalTo(1)); assertTrue(innerHitBuilders.containsKey(leafInnerHits.getName())); assertNull(queryShardContext.getParentFilter()); + assertFalse(queryShardContext.isInnerHitQuery()); innerHitBuilders.get(leafInnerHits.getName()).build(searchContext, innerHitsContext); assertNull(queryShardContext.getParentFilter()); verify(innerQueryBuilder).toQuery(queryShardContext); From b9d97297f1df4ec532db5b3e90c92636668a9061 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 14:55:42 +0800 Subject: [PATCH 24/30] Bump com.nimbusds:nimbus-jose-jwt from 9.41.1 to 9.46 in /test/fixtures/hdfs-fixture (#16611) * Bump com.nimbusds:nimbus-jose-jwt in /test/fixtures/hdfs-fixture Bumps [com.nimbusds:nimbus-jose-jwt](https://bitbucket.org/connect2id/nimbus-jose-jwt) from 9.41.1 to 9.46. - [Changelog](https://bitbucket.org/connect2id/nimbus-jose-jwt/src/master/CHANGELOG.txt) - [Commits](https://bitbucket.org/connect2id/nimbus-jose-jwt/branches/compare/9.46..9.41.1) --- updated-dependencies: - dependency-name: com.nimbusds:nimbus-jose-jwt dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: gaobinlong Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: gaobinlong --- CHANGELOG.md | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d552b5eed83b..f83a1935b8dfc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.apis:google-api-services-compute` from v1-rev20240407-2.0.0 to v1-rev20241021-2.0.0 ([#16502](https://github.com/opensearch-project/OpenSearch/pull/16502), [#16548](https://github.com/opensearch-project/OpenSearch/pull/16548)) - Bump `com.azure:azure-storage-blob` from 12.23.0 to 12.28.1 ([#16501](https://github.com/opensearch-project/OpenSearch/pull/16501)) - Bump `org.apache.hadoop:hadoop-minicluster` from 3.4.0 to 3.4.1 ([#16550](https://github.com/opensearch-project/OpenSearch/pull/16550)) +- Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 9.46 ([#16611](https://github.com/opensearch-project/OpenSearch/pull/16611)) - Bump `lycheeverse/lychee-action` from 2.0.2 to 2.1.0 ([#16610](https://github.com/opensearch-project/OpenSearch/pull/16610)) - Bump `me.champeau.gradle.japicmp` from 0.4.4 to 0.4.5 ([#16614](https://github.com/opensearch-project/OpenSearch/pull/16614)) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 8a402879970d7..bfa6c65ee6ec8 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -79,7 +79,7 @@ dependencies { api "org.jboss.xnio:xnio-nio:3.8.16.Final" api 'org.jline:jline:3.27.1' api 'org.apache.commons:commons-configuration2:2.11.0' - api 'com.nimbusds:nimbus-jose-jwt:9.41.1' + api 'com.nimbusds:nimbus-jose-jwt:9.46' api ('org.apache.kerby:kerb-admin:2.1.0') { exclude group: "org.jboss.xnio" exclude group: "org.jline" From 7f27ddca1c3dc9f4ff6fa4216c7f28171cec3db9 Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Tue, 12 Nov 2024 13:03:35 +0530 Subject: [PATCH 25/30] Complete keyword changes for star tree (#16233) --------- Signed-off-by: Bharathwaj G Signed-off-by: bharath-techie --- CHANGELOG.md | 1 + .../index/mapper/StarTreeMapperIT.java | 32 +- .../lucene/index/DocValuesWriterWrapper.java | 18 + .../SortedNumericDocValuesWriterWrapper.java | 11 +- .../SortedSetDocValuesWriterWrapper.java | 58 ++ .../Composite912DocValuesReader.java | 33 +- .../Composite912DocValuesWriter.java | 62 +- .../datacube/DateDimension.java | 9 +- .../datacube/DimensionFactory.java | 28 +- .../datacube/DimensionType.java | 8 +- .../datacube/KeywordDimension.java | 82 +++ .../datacube/NumericDimension.java | 10 +- .../datacube/ReadDimension.java | 17 +- .../datacube/startree/StarTreeField.java | 12 +- .../startree/builder/BaseStarTreeBuilder.java | 228 ++++++- .../builder/OffHeapStarTreeBuilder.java | 16 +- .../builder/OnHeapStarTreeBuilder.java | 17 +- .../startree/builder/StarTreeBuilder.java | 1 + .../builder/StarTreeDocsFileManager.java | 3 + .../startree/builder/StarTreesBuilder.java | 8 +- .../fileformats/meta/StarTreeMetadata.java | 46 +- .../meta/StarTreeMetadataWriter.java | 26 +- .../startree/index/StarTreeValues.java | 62 +- .../utils/SequentialDocValuesIterator.java | 29 + .../startree/utils/StarTreeUtils.java | 12 +- .../SortedSetStarTreeValuesIterator.java | 59 ++ .../index/mapper/KeywordFieldMapper.java | 7 + .../SortedSetDocValuesWriterWrapperTests.java | 98 +++ .../AbstractStarTreeDVFormatTests.java | 126 ++++ .../StarTreeDocValuesFormatTests.java | 143 +---- .../StarTreeKeywordDocValuesFormatTests.java | 572 ++++++++++++++++++ .../datacube/startree/StarTreeTestUtils.java | 4 + .../builder/BaseStarTreeBuilderTests.java | 2 +- .../startree/builder/BuilderTestsUtils.java | 67 +- .../builder/StarTreeBuildMetricTests.java | 21 +- .../StarTreeBuilderFlushFlowTests.java | 143 ++++- .../StarTreeBuilderMergeFlowTests.java | 249 +++++++- .../builder/StarTreeBuilderTestCase.java | 34 +- .../meta/StarTreeMetadataTests.java | 8 +- .../startree/utils/StarTreeUtilsTests.java | 3 +- .../index/mapper/ObjectMapperTests.java | 11 +- .../index/mapper/StarTreeMapperTests.java | 43 +- .../search/SearchServiceStarTreeTests.java | 4 +- .../startree/MetricAggregatorTests.java | 2 +- .../startree/StarTreeFilterTests.java | 2 +- 45 files changed, 2120 insertions(+), 307 deletions(-) create mode 100644 server/src/main/java/org/apache/lucene/index/DocValuesWriterWrapper.java create mode 100644 server/src/main/java/org/apache/lucene/index/SortedSetDocValuesWriterWrapper.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/KeywordDimension.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/SortedSetStarTreeValuesIterator.java create mode 100644 server/src/test/java/org/opensearch/index/codec/composite/SortedSetDocValuesWriterWrapperTests.java create mode 100644 server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/AbstractStarTreeDVFormatTests.java create mode 100644 server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeKeywordDocValuesFormatTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index f83a1935b8dfc..30b1d5908c1a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Increase segrep pressure checkpoint default limit to 30 ([#16577](https://github.com/opensearch-project/OpenSearch/pull/16577/files)) - Add dynamic setting allowing size > 0 requests to be cached in the request cache ([#16483](https://github.com/opensearch-project/OpenSearch/pull/16483)) - Make IndexStoreListener a pluggable interface ([#16583](https://github.com/opensearch-project/OpenSearch/pull/16583)) +- Support for keyword fields in star-tree index ([#16233](https://github.com/opensearch-project/OpenSearch/pull/16233)) - Add a flag in QueryShardContext to differentiate inner hit query ([#16600](https://github.com/opensearch-project/OpenSearch/pull/16600)) ### Dependencies diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java index 5840884f5422a..c91c4d7bbb63b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java @@ -56,7 +56,7 @@ public class StarTreeMapperIT extends OpenSearchIntegTestCase { .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) .build(); - private static XContentBuilder createMinimalTestMapping(boolean invalidDim, boolean invalidMetric, boolean keywordDim) { + private static XContentBuilder createMinimalTestMapping(boolean invalidDim, boolean invalidMetric, boolean ipdim) { try { return jsonBuilder().startObject() .startObject("composite") @@ -68,12 +68,15 @@ private static XContentBuilder createMinimalTestMapping(boolean invalidDim, bool .endObject() .startArray("ordered_dimensions") .startObject() - .field("name", getDim(invalidDim, keywordDim)) + .field("name", getDim(invalidDim, ipdim)) + .endObject() + .startObject() + .field("name", "keyword_dv") .endObject() .endArray() .startArray("metrics") .startObject() - .field("name", getDim(invalidMetric, false)) + .field("name", getMetric(invalidMetric, false)) .endObject() .endArray() .endObject() @@ -99,6 +102,10 @@ private static XContentBuilder createMinimalTestMapping(boolean invalidDim, bool .field("type", "keyword") .field("doc_values", false) .endObject() + .startObject("ip") + .field("type", "ip") + .field("doc_values", false) + .endObject() .endObject() .endObject(); } catch (IOException e) { @@ -356,10 +363,19 @@ private XContentBuilder getMappingWithDuplicateFields(boolean isDuplicateDim, bo } private static String getDim(boolean hasDocValues, boolean isKeyword) { + if (hasDocValues) { + return random().nextBoolean() ? "numeric" : "keyword"; + } else if (isKeyword) { + return "ip"; + } + return "numeric_dv"; + } + + private static String getMetric(boolean hasDocValues, boolean isKeyword) { if (hasDocValues) { return "numeric"; } else if (isKeyword) { - return "keyword"; + return "ip"; } return "numeric_dv"; } @@ -398,6 +414,7 @@ public void testValidCompositeIndex() { assertEquals(expectedTimeUnits.get(i).shortName(), dateDim.getSortedCalendarIntervals().get(i).shortName()); } assertEquals("numeric_dv", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals("keyword_dv", starTreeFieldType.getDimensions().get(2).getField()); assertEquals("numeric_dv", starTreeFieldType.getMetrics().get(0).getField()); List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); @@ -665,10 +682,7 @@ public void testInvalidDimCompositeIndex() { IllegalArgumentException.class, () -> prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(true, false, false)).get() ); - assertEquals( - "Aggregations not supported for the dimension field [numeric] with field type [integer] as part of star tree field", - ex.getMessage() - ); + assertTrue(ex.getMessage().startsWith("Aggregations not supported for the dimension field ")); } public void testMaxDimsCompositeIndex() { @@ -734,7 +748,7 @@ public void testUnsupportedDim() { () -> prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, true)).get() ); assertEquals( - "Failed to parse mapping [_doc]: unsupported field type associated with dimension [keyword] as part of star tree field [startree-1]", + "Failed to parse mapping [_doc]: unsupported field type associated with dimension [ip] as part of star tree field [startree-1]", ex.getMessage() ); } diff --git a/server/src/main/java/org/apache/lucene/index/DocValuesWriterWrapper.java b/server/src/main/java/org/apache/lucene/index/DocValuesWriterWrapper.java new file mode 100644 index 0000000000000..5329bad776e43 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/index/DocValuesWriterWrapper.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.index; + +import org.apache.lucene.search.DocIdSetIterator; + +/** + * Base wrapper class for DocValuesWriter. + */ +public interface DocValuesWriterWrapper { + T getDocValues(); +} diff --git a/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java b/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java index f7759fcced284..582e4c3f87f98 100644 --- a/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java +++ b/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java @@ -18,9 +18,9 @@ * * @opensearch.experimental */ -public class SortedNumericDocValuesWriterWrapper { +public class SortedNumericDocValuesWriterWrapper implements DocValuesWriterWrapper { - private final SortedNumericDocValuesWriter sortedNumericDocValuesWriter; + private final SortedNumericDocValuesWriter sortedNumericDocValuesWriterDelegate; /** * Sole constructor. Constructs a new {@link SortedNumericDocValuesWriterWrapper} instance. @@ -29,7 +29,7 @@ public class SortedNumericDocValuesWriterWrapper { * @param counter a counter for tracking memory usage */ public SortedNumericDocValuesWriterWrapper(FieldInfo fieldInfo, Counter counter) { - sortedNumericDocValuesWriter = new SortedNumericDocValuesWriter(fieldInfo, counter); + sortedNumericDocValuesWriterDelegate = new SortedNumericDocValuesWriter(fieldInfo, counter); } /** @@ -39,7 +39,7 @@ public SortedNumericDocValuesWriterWrapper(FieldInfo fieldInfo, Counter counter) * @param value the value to add */ public void addValue(int docID, long value) { - sortedNumericDocValuesWriter.addValue(docID, value); + sortedNumericDocValuesWriterDelegate.addValue(docID, value); } /** @@ -47,7 +47,8 @@ public void addValue(int docID, long value) { * * @return the {@link SortedNumericDocValues} instance */ + @Override public SortedNumericDocValues getDocValues() { - return sortedNumericDocValuesWriter.getDocValues(); + return sortedNumericDocValuesWriterDelegate.getDocValues(); } } diff --git a/server/src/main/java/org/apache/lucene/index/SortedSetDocValuesWriterWrapper.java b/server/src/main/java/org/apache/lucene/index/SortedSetDocValuesWriterWrapper.java new file mode 100644 index 0000000000000..95aa242535e48 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/index/SortedSetDocValuesWriterWrapper.java @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.index; + +import org.apache.lucene.util.ByteBlockPool; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.Counter; + +/** + * A wrapper class for writing sorted set doc values. + *

+ * This class provides a convenient way to add sorted set doc values to a field + * and retrieve the corresponding {@link SortedSetDocValues} instance. + * + * @opensearch.experimental + */ +public class SortedSetDocValuesWriterWrapper implements DocValuesWriterWrapper { + + private final SortedSetDocValuesWriter sortedSetDocValuesWriterDelegate; + + /** + * Sole constructor. Constructs a new {@link SortedSetDocValuesWriterWrapper} instance. + * + * @param fieldInfo the field information for the field being written + * @param counter a counter for tracking memory usage + * @param byteBlockPool a byte block pool for allocating byte blocks + * @see SortedSetDocValuesWriter + */ + public SortedSetDocValuesWriterWrapper(FieldInfo fieldInfo, Counter counter, ByteBlockPool byteBlockPool) { + sortedSetDocValuesWriterDelegate = new SortedSetDocValuesWriter(fieldInfo, counter, byteBlockPool); + } + + /** + * Adds a bytes ref value to the sorted set doc values for the specified document. + * + * @param docID the document ID + * @param value the value to add + */ + public void addValue(int docID, BytesRef value) { + sortedSetDocValuesWriterDelegate.addValue(docID, value); + } + + /** + * Returns the {@link SortedSetDocValues} instance containing the sorted numeric doc values + * + * @return the {@link SortedSetDocValues} instance + */ + @Override + public SortedSetDocValues getDocValues() { + return sortedSetDocValuesWriterDelegate.getDocValues(); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesReader.java b/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesReader.java index 637d3250fda3f..38d3f4867e89b 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesReader.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesReader.java @@ -14,7 +14,7 @@ import org.apache.lucene.codecs.DocValuesProducer; import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.CorruptIndexException; -import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.IndexFileNames; @@ -40,6 +40,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -111,7 +112,7 @@ public Composite912DocValuesReader(DocValuesProducer producer, SegmentReadState readState.segmentInfo.getId(), readState.segmentSuffix ); - + Map dimensionFieldTypeMap = new HashMap<>(); while (true) { // validate magic marker @@ -155,13 +156,16 @@ public Composite912DocValuesReader(DocValuesProducer producer, SegmentReadState compositeIndexInputMap.put(compositeFieldName, starTreeIndexInput); compositeIndexMetadataMap.put(compositeFieldName, starTreeMetadata); - List dimensionFields = starTreeMetadata.getDimensionFields(); - + Map dimensionFieldToDocValuesMap = starTreeMetadata.getDimensionFields(); // generating star tree unique fields (fully qualified name for dimension and metrics) - for (String dimensions : dimensionFields) { - fields.add(fullyQualifiedFieldNameForStarTreeDimensionsDocValues(compositeFieldName, dimensions)); + for (Map.Entry dimensionEntry : dimensionFieldToDocValuesMap.entrySet()) { + String dimName = fullyQualifiedFieldNameForStarTreeDimensionsDocValues( + compositeFieldName, + dimensionEntry.getKey() + ); + fields.add(dimName); + dimensionFieldTypeMap.put(dimName, dimensionEntry.getValue()); } - // adding metric fields for (Metric metric : starTreeMetadata.getMetrics()) { for (MetricStat metricStat : metric.getBaseMetrics()) { @@ -184,7 +188,7 @@ public Composite912DocValuesReader(DocValuesProducer producer, SegmentReadState // populates the dummy list of field infos to fetch doc id set iterators for respective fields. // the dummy field info is used to fetch the doc id set iterators for respective fields based on field name - FieldInfos fieldInfos = new FieldInfos(getFieldInfoList(fields)); + FieldInfos fieldInfos = new FieldInfos(getFieldInfoList(fields, dimensionFieldTypeMap)); this.readState = new SegmentReadState( readState.directory, readState.segmentInfo, @@ -291,17 +295,4 @@ public CompositeIndexValues getCompositeIndexValues(CompositeIndexFieldInfo comp } - /** - * Returns the sorted numeric doc values for the given sorted numeric field. - * If the sorted numeric field is null, it returns an empty doc id set iterator. - *

- * Sorted numeric field can be null for cases where the segment doesn't hold a particular value. - * - * @param sortedNumeric the sorted numeric doc values for a field - * @return empty sorted numeric values if the field is not present, else sortedNumeric - */ - public static SortedNumericDocValues getSortedNumericDocValues(SortedNumericDocValues sortedNumeric) { - return sortedNumeric == null ? DocValues.emptySortedNumeric() : sortedNumeric; - } - } diff --git a/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java b/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java index dd35091dece2f..904d6a7aba5c6 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java @@ -22,6 +22,7 @@ import org.apache.lucene.index.SegmentInfo; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.store.IndexOutput; import org.opensearch.common.annotation.ExperimentalApi; @@ -29,12 +30,12 @@ import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.codec.composite.CompositeIndexReader; import org.opensearch.index.codec.composite.LuceneDocValuesConsumerFactory; -import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.builder.StarTreesBuilder; import org.opensearch.index.compositeindex.datacube.startree.index.CompositeIndexValues; import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; import org.opensearch.index.mapper.CompositeMappedFieldType; import org.opensearch.index.mapper.DocCountFieldMapper; +import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MapperService; import java.io.IOException; @@ -71,6 +72,7 @@ public class Composite912DocValuesWriter extends DocValuesConsumer { private final AtomicInteger fieldNumberAcrossCompositeFields; private final Map fieldProducerMap = new HashMap<>(); + private final Map fieldDocIdSetIteratorMap = new HashMap<>(); public Composite912DocValuesWriter(DocValuesConsumer delegate, SegmentWriteState segmentWriteState, MapperService mapperService) throws IOException { @@ -82,14 +84,7 @@ public Composite912DocValuesWriter(DocValuesConsumer delegate, SegmentWriteState this.compositeMappedFieldTypes = mapperService.getCompositeFieldTypes(); compositeFieldSet = new HashSet<>(); segmentFieldSet = new HashSet<>(); - // TODO : add integ test for this - for (FieldInfo fi : this.state.fieldInfos) { - if (DocValuesType.SORTED_NUMERIC.equals(fi.getDocValuesType())) { - segmentFieldSet.add(fi.name); - } else if (fi.name.equals(DocCountFieldMapper.NAME)) { - segmentFieldSet.add(fi.name); - } - } + addStarTreeSupportedFieldsFromSegment(); for (CompositeMappedFieldType type : compositeMappedFieldTypes) { compositeFieldSet.addAll(type.fields()); } @@ -148,6 +143,17 @@ public Composite912DocValuesWriter(DocValuesConsumer delegate, SegmentWriteState segmentHasCompositeFields = Collections.disjoint(segmentFieldSet, compositeFieldSet) == false; } + private void addStarTreeSupportedFieldsFromSegment() { + // TODO : add integ test for this + for (FieldInfo fi : this.state.fieldInfos) { + if (DocValuesType.SORTED_NUMERIC.equals(fi.getDocValuesType()) + || DocValuesType.SORTED_SET.equals(fi.getDocValuesType()) + || fi.name.equals(DocCountFieldMapper.NAME)) { + segmentFieldSet.add(fi.name); + } + } + } + @Override public void addNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { delegate.addNumericField(field, valuesProducer); @@ -179,6 +185,15 @@ public void addSortedNumericField(FieldInfo field, DocValuesProducer valuesProdu @Override public void addSortedSetField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { delegate.addSortedSetField(field, valuesProducer); + // Perform this only during flush flow + if (mergeState.get() == null && segmentHasCompositeFields) { + createCompositeIndicesIfPossible(valuesProducer, field); + } + if (mergeState.get() != null) { + if (compositeFieldSet.contains(field.name)) { + fieldDocIdSetIteratorMap.put(field.name, valuesProducer.getSortedSet(field)); + } + } } @Override @@ -231,6 +246,7 @@ private void createCompositeIndicesIfPossible(DocValuesProducer valuesProducer, * Add empty doc values for fields not present in segment */ private void addDocValuesForEmptyField(String compositeField) { + // special case for doc count if (compositeField.equals(DocCountFieldMapper.NAME)) { fieldProducerMap.put(compositeField, new EmptyDocValuesProducer() { @Override @@ -239,16 +255,31 @@ public NumericDocValues getNumeric(FieldInfo field) { } }); } else { - fieldProducerMap.put(compositeField, new EmptyDocValuesProducer() { - @Override - public SortedNumericDocValues getSortedNumeric(FieldInfo field) { - return DocValues.emptySortedNumeric(); - } - }); + if (isSortedSetField(compositeField)) { + fieldProducerMap.put(compositeField, new EmptyDocValuesProducer() { + @Override + public SortedSetDocValues getSortedSet(FieldInfo field) { + return DocValues.emptySortedSet(); + } + }); + } + // TODO : change this logic to evaluate for sortedNumericField specifically + else { + fieldProducerMap.put(compositeField, new EmptyDocValuesProducer() { + @Override + public SortedNumericDocValues getSortedNumeric(FieldInfo field) { + return DocValues.emptySortedNumeric(); + } + }); + } } compositeFieldSet.remove(compositeField); } + private boolean isSortedSetField(String field) { + return mapperService.fieldType(field) instanceof KeywordFieldMapper.KeywordFieldType; + } + @Override public void merge(MergeState mergeState) throws IOException { this.mergeState.compareAndSet(null, mergeState); @@ -272,7 +303,6 @@ private void mergeCompositeFields(MergeState mergeState) throws IOException { */ private void mergeStarTreeFields(MergeState mergeState) throws IOException { Map> starTreeSubsPerField = new HashMap<>(); - StarTreeField starTreeField = null; for (int i = 0; i < mergeState.docValuesProducers.length; i++) { CompositeIndexReader reader = null; if (mergeState.docValuesProducers[i] == null) { diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DateDimension.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DateDimension.java index 8feb9ccd27dbd..88a67e1134067 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DateDimension.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DateDimension.java @@ -99,6 +99,11 @@ public List getSubDimensionNames() { return fields; } + @Override + public DocValuesType getDocValuesType() { + return DocValuesType.SORTED_NUMERIC; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject("date_dimension"); @@ -170,8 +175,4 @@ public int compare(DateTimeUnitRounding unit1, DateTimeUnitRounding unit2) { public static List getSortedDateTimeUnits(List dateTimeUnits) { return dateTimeUnits.stream().sorted(new DateTimeUnitComparator()).collect(Collectors.toList()); } - - public DocValuesType getDocValuesType() { - return DocValuesType.SORTED_NUMERIC; - } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java index 7e72a3f0d9de6..e834706e2fa9d 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java @@ -24,6 +24,7 @@ import java.util.stream.Collectors; import static org.opensearch.index.compositeindex.datacube.DateDimension.CALENDAR_INTERVALS; +import static org.opensearch.index.compositeindex.datacube.KeywordDimension.KEYWORD; /** * Dimension factory class mainly used to parse and create dimension from the mappings @@ -43,6 +44,8 @@ public static Dimension parseAndCreateDimension( return parseAndCreateDateDimension(name, dimensionMap, c); case NumericDimension.NUMERIC: return new NumericDimension(name); + case KEYWORD: + return new KeywordDimension(name); default: throw new IllegalArgumentException( String.format(Locale.ROOT, "unsupported field type associated with dimension [%s] as part of star tree field", name) @@ -56,16 +59,23 @@ public static Dimension parseAndCreateDimension( Map dimensionMap, Mapper.TypeParser.ParserContext c ) { - if (builder.getSupportedDataCubeDimensionType().isPresent() - && builder.getSupportedDataCubeDimensionType().get().equals(DimensionType.DATE)) { - return parseAndCreateDateDimension(name, dimensionMap, c); - } else if (builder.getSupportedDataCubeDimensionType().isPresent() - && builder.getSupportedDataCubeDimensionType().get().equals(DimensionType.NUMERIC)) { + if (builder.getSupportedDataCubeDimensionType().isEmpty()) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, "unsupported field type associated with star tree dimension [%s]", name) + ); + } + switch (builder.getSupportedDataCubeDimensionType().get()) { + case DATE: + return parseAndCreateDateDimension(name, dimensionMap, c); + case NUMERIC: return new NumericDimension(name); - } - throw new IllegalArgumentException( - String.format(Locale.ROOT, "unsupported field type associated with star tree dimension [%s]", name) - ); + case KEYWORD: + return new KeywordDimension(name); + default: + throw new IllegalArgumentException( + String.format(Locale.ROOT, "unsupported field type associated with star tree dimension [%s]", name) + ); + } } private static DateDimension parseAndCreateDateDimension( diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java index 4b9faea331752..d327f8ca1fa1e 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java @@ -27,5 +27,11 @@ public enum DimensionType { * Represents a date dimension type. * This is used for dimensions that contain date or timestamp values. */ - DATE + DATE, + + /** + * Represents a keyword dimension type. + * This is used for dimensions that contain keyword ordinals. + */ + KEYWORD } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/KeywordDimension.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/KeywordDimension.java new file mode 100644 index 0000000000000..58e248fd548d6 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/KeywordDimension.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube; + +import org.apache.lucene.index.DocValuesType; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.mapper.CompositeDataCubeFieldType; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.function.Consumer; + +/** + * Composite index keyword dimension class + * + * @opensearch.experimental + */ +@ExperimentalApi +public class KeywordDimension implements Dimension { + public static final String KEYWORD = "keyword"; + private final String field; + + public KeywordDimension(String field) { + this.field = field; + } + + @Override + public String getField() { + return field; + } + + @Override + public int getNumSubDimensions() { + return 1; + } + + @Override + public void setDimensionValues(Long value, Consumer dimSetter) { + // This will set the keyword dimension value's ordinal + dimSetter.accept(value); + } + + @Override + public List getSubDimensionNames() { + return List.of(field); + } + + @Override + public DocValuesType getDocValuesType() { + return DocValuesType.SORTED_SET; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(CompositeDataCubeFieldType.NAME, field); + builder.field(CompositeDataCubeFieldType.TYPE, KEYWORD); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + KeywordDimension dimension = (KeywordDimension) o; + return Objects.equals(field, dimension.getField()); + } + + @Override + public int hashCode() { + return Objects.hash(field); + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/NumericDimension.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/NumericDimension.java index f1d1b15337f4a..fe9e3d17c0047 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/NumericDimension.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/NumericDimension.java @@ -51,6 +51,11 @@ public List getSubDimensionNames() { return List.of(field); } + @Override + public DocValuesType getDocValuesType() { + return DocValuesType.SORTED_NUMERIC; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -72,9 +77,4 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(field); } - - @Override - public DocValuesType getDocValuesType() { - return DocValuesType.SORTED_NUMERIC; - } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/ReadDimension.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/ReadDimension.java index 0e2ec086abc0a..384553a8f7e06 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/ReadDimension.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/ReadDimension.java @@ -25,9 +25,16 @@ public class ReadDimension implements Dimension { public static final String READ = "read"; private final String field; + private final DocValuesType docValuesType; public ReadDimension(String field) { this.field = field; + this.docValuesType = DocValuesType.SORTED_NUMERIC; + } + + public ReadDimension(String field, DocValuesType docValuesType) { + this.field = field; + this.docValuesType = docValuesType; } public String getField() { @@ -49,6 +56,11 @@ public List getSubDimensionNames() { return List.of(field); } + @Override + public DocValuesType getDocValuesType() { + return docValuesType; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -70,9 +82,4 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(field); } - - @Override - public DocValuesType getDocValuesType() { - return DocValuesType.SORTED_NUMERIC; - } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeField.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeField.java index 833bf63c04a18..37b59fc1f59c8 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeField.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeField.java @@ -8,6 +8,7 @@ package org.opensearch.index.compositeindex.datacube.startree; +import org.apache.lucene.index.DocValuesType; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; @@ -33,6 +34,7 @@ public class StarTreeField implements ToXContent { private final List metrics; private final StarTreeFieldConfiguration starTreeConfig; private final List dimensionNames; + private final List dimensionDocValueTypes; private final List metricNames; public StarTreeField(String name, List dimensions, List metrics, StarTreeFieldConfiguration starTreeConfig) { @@ -41,8 +43,12 @@ public StarTreeField(String name, List dimensions, List metri this.metrics = metrics; this.starTreeConfig = starTreeConfig; dimensionNames = new ArrayList<>(); + dimensionDocValueTypes = new ArrayList<>(); for (Dimension dimension : dimensions) { - dimensionNames.addAll(dimension.getSubDimensionNames()); + for (String dimensionName : dimension.getSubDimensionNames()) { + dimensionNames.add(dimensionName); + dimensionDocValueTypes.add(dimension.getDocValuesType()); + } } metricNames = new ArrayList<>(); for (Metric metric : metrics) { @@ -64,6 +70,10 @@ public List getDimensionNames() { return dimensionNames; } + public List getDimensionDocValueTypes() { + return dimensionDocValueTypes; + } + public List getMetricNames() { return metricNames; } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java index 3054e8e66b601..cf36f2d7d4126 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java @@ -13,15 +13,23 @@ import org.apache.lucene.codecs.DocValuesProducer; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.DocValuesWriterWrapper; import org.apache.lucene.index.EmptyDocValuesProducer; import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.OrdinalMap; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedNumericDocValuesWriterWrapper; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.SortedSetDocValuesWriterWrapper; +import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.util.ByteBlockPool; import org.apache.lucene.util.Counter; +import org.apache.lucene.util.LongValues; import org.apache.lucene.util.NumericUtils; +import org.apache.lucene.util.packed.PackedInts; import org.opensearch.index.compositeindex.datacube.Dimension; import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.MetricStat; @@ -36,6 +44,7 @@ import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNodeType; import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedSetStarTreeValuesIterator; import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.FieldMapper; import org.opensearch.index.mapper.FieldValueConverter; @@ -45,6 +54,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -86,12 +97,20 @@ public abstract class BaseStarTreeBuilder implements StarTreeBuilder { protected final int maxLeafDocuments; List dimensionsSplitOrder = new ArrayList<>(); protected final InMemoryTreeNode rootNode = getNewNode(); - protected final StarTreeField starTreeField; private final SegmentWriteState writeState; private final IndexOutput metaOut; private final IndexOutput dataOut; + private final Counter bytesUsed = Counter.newCounter(); + private Map flushSortedSetDocValuesMap = new HashMap<>(); + // Maintains list of sortedSetDocValues for each star tree dimension field across segments during merge + private Map> mergeSortedSetDimensionsMap = new HashMap<>(); + // Maintains ordinalMap for each star tree dimension field during merge + private Map mergeSortedSetDimensionsOrdinalMap = new HashMap<>(); + + // This should be true for merge flows + protected boolean isMerge = false; /** * Reads all the configuration related to dimensions and metrics, builds a star-tree based on the different construction parameters. @@ -233,11 +252,23 @@ public void build( String dimension = dimensionsSplitOrder.get(i).getField(); FieldInfo dimensionFieldInfo = writeState.fieldInfos.fieldInfo(dimension); if (dimensionFieldInfo == null) { - dimensionFieldInfo = getFieldInfo(dimension, DocValuesType.SORTED_NUMERIC); + dimensionFieldInfo = getFieldInfo(dimension, dimensionsSplitOrder.get(i).getDocValuesType()); } - dimensionReaders[i] = new SequentialDocValuesIterator( - new SortedNumericStarTreeValuesIterator(fieldProducerMap.get(dimensionFieldInfo.name).getSortedNumeric(dimensionFieldInfo)) + dimensionReaders[i] = getSequentialDocValuesIterator( + dimensionFieldInfo, + fieldProducerMap, + dimensionsSplitOrder.get(i).getDocValuesType() ); + + if (dimensionsSplitOrder.get(i).getDocValuesType().equals(DocValuesType.SORTED_SET)) { + // This is needed as we need to write the ordinals and also the bytesRef associated with it + // as part of star tree doc values file formats + flushSortedSetDocValuesMap.put( + dimensionsSplitOrder.get(i).getField(), + fieldProducerMap.get(dimensionFieldInfo.name).getSortedSet(dimensionFieldInfo) + ); + } + } Iterator starTreeDocumentIterator = sortAndAggregateSegmentDocuments(dimensionReaders, metricReaders); logger.debug("Sorting and aggregating star-tree in ms : {}", (System.currentTimeMillis() - startTime)); @@ -245,6 +276,72 @@ public void build( logger.debug("Finished Building star-tree in ms : {}", (System.currentTimeMillis() - startTime)); } + /** + * Returns the sequential doc values iterator for the given field based on associated docValuesType + */ + private SequentialDocValuesIterator getSequentialDocValuesIterator( + FieldInfo fieldInfo, + Map fieldProducerMap, + DocValuesType type + ) throws IOException { + switch (type) { + case SORTED_NUMERIC: + return new SequentialDocValuesIterator( + new SortedNumericStarTreeValuesIterator(fieldProducerMap.get(fieldInfo.name).getSortedNumeric(fieldInfo)) + ); + case SORTED_SET: + return new SequentialDocValuesIterator( + new SortedSetStarTreeValuesIterator(fieldProducerMap.get(fieldInfo.name).getSortedSet(fieldInfo)) + ); + default: + throw new IllegalArgumentException("Unsupported type: " + type); + } + } + + /** + * Returns the ordinal map per field based on given star-tree values across different segments + */ + protected Map getOrdinalMaps(List starTreeValuesSubs) throws IOException { + long curr = System.currentTimeMillis(); + Map> dimensionToIterators = new HashMap<>(); + // Group iterators by dimension + for (StarTreeValues starTree : starTreeValuesSubs) { + for (String dimName : starTree.getStarTreeField().getDimensionNames()) { + if (starTree.getDimensionValuesIterator(dimName) instanceof SortedSetStarTreeValuesIterator) { + dimensionToIterators.computeIfAbsent(dimName, k -> new ArrayList<>()) + .add((SortedSetStarTreeValuesIterator) starTree.getDimensionValuesIterator(dimName)); + } + } + } + + if (dimensionToIterators.isEmpty()) return Collections.emptyMap(); + this.mergeSortedSetDimensionsMap = dimensionToIterators; + Map dimensionToOrdinalMap = new HashMap<>(); + for (Map.Entry> entry : dimensionToIterators.entrySet()) { + String dimName = entry.getKey(); + List iterators = entry.getValue(); + + // step 1: iterate through each sub and mark terms still in use + TermsEnum[] liveTerms = new TermsEnum[iterators.size()]; + long[] weights = new long[liveTerms.length]; + + for (int sub = 0; sub < liveTerms.length; sub++) { + SortedSetStarTreeValuesIterator dv = iterators.get(sub); + liveTerms[sub] = dv.termsEnum(); + weights[sub] = dv.getValueCount(); + } + + // step 2: create ordinal map for this dimension + OrdinalMap map = OrdinalMap.build(null, liveTerms, weights, PackedInts.COMPACT); + dimensionToOrdinalMap.put(dimName, map); + + logger.debug("Ordinal map for dimension {} - Size in bytes: {}", dimName, map.ramBytesUsed()); + } + this.mergeSortedSetDimensionsOrdinalMap = dimensionToOrdinalMap; + logger.debug("Total time to build ordinal maps: {} ms", System.currentTimeMillis() - curr); + return dimensionToOrdinalMap; + } + /** * Builds the star tree using sorted and aggregated star-tree Documents * @@ -295,6 +392,9 @@ void appendDocumentsToStarTree(Iterator starTreeDocumentIterat } } + /** + * Writes star tree structure to file format + */ private void serializeStarTree(int numSegmentStarTreeDocuments, int numStarTreeDocs) throws IOException { // serialize the star tree data long dataFilePointer = dataOut.getFilePointer(); @@ -314,10 +414,13 @@ private void serializeStarTree(int numSegmentStarTreeDocuments, int numStarTreeD ); } + /** + * Creates the star-tree docValues indices in disk + */ private void createSortedDocValuesIndices(DocValuesConsumer docValuesConsumer, AtomicInteger fieldNumberAcrossStarTrees) throws IOException { - List dimensionWriters = new ArrayList<>(); - List metricWriters = new ArrayList<>(); + List> dimensionWriters = new ArrayList<>(); + List> metricWriters = new ArrayList<>(); FieldInfo[] dimensionFieldInfoList = new FieldInfo[numDimensions]; FieldInfo[] metricFieldInfoList = new FieldInfo[metricAggregatorInfos.size()]; int dimIndex = 0; @@ -325,16 +428,21 @@ private void createSortedDocValuesIndices(DocValuesConsumer docValuesConsumer, A for (String name : dim.getSubDimensionNames()) { final FieldInfo fi = getFieldInfo( fullyQualifiedFieldNameForStarTreeDimensionsDocValues(starTreeField.getName(), name), - DocValuesType.SORTED_NUMERIC, + dim.getDocValuesType(), fieldNumberAcrossStarTrees.getAndIncrement() ); dimensionFieldInfoList[dimIndex] = fi; - dimensionWriters.add(new SortedNumericDocValuesWriterWrapper(fi, Counter.newCounter())); + if (dim.getDocValuesType().equals(DocValuesType.SORTED_SET)) { + ByteBlockPool.DirectTrackingAllocator byteBlockAllocator = new ByteBlockPool.DirectTrackingAllocator(bytesUsed); + ByteBlockPool docValuesBytePool = new ByteBlockPool(byteBlockAllocator); + dimensionWriters.add(new SortedSetDocValuesWriterWrapper(fi, bytesUsed, docValuesBytePool)); + } else { + dimensionWriters.add(new SortedNumericDocValuesWriterWrapper(fi, bytesUsed)); + } dimIndex++; } } for (int i = 0; i < metricAggregatorInfos.size(); i++) { - final FieldInfo fi = getFieldInfo( fullyQualifiedFieldNameForStarTreeMetricsDocValues( starTreeField.getName(), @@ -344,16 +452,18 @@ private void createSortedDocValuesIndices(DocValuesConsumer docValuesConsumer, A DocValuesType.SORTED_NUMERIC, fieldNumberAcrossStarTrees.getAndIncrement() ); - metricFieldInfoList[i] = fi; - metricWriters.add(new SortedNumericDocValuesWriterWrapper(fi, Counter.newCounter())); + metricWriters.add(new SortedNumericDocValuesWriterWrapper(fi, bytesUsed)); } - for (int docId = 0; docId < numStarTreeDocs; docId++) { StarTreeDocument starTreeDocument = getStarTreeDocument(docId); - for (int i = 0; i < starTreeDocument.dimensions.length; i++) { - if (starTreeDocument.dimensions[i] != null) { - dimensionWriters.get(i).addValue(docId, starTreeDocument.dimensions[i]); + int idx = 0; + for (Dimension dim : dimensionsSplitOrder) { + for (String name : dim.getSubDimensionNames()) { + if (starTreeDocument.dimensions[idx] != null) { + indexDocValue(dimensionWriters.get(idx), docId, starTreeDocument.dimensions[idx], dim.getField()); + } + idx++; } } @@ -362,11 +472,17 @@ private void createSortedDocValuesIndices(DocValuesConsumer docValuesConsumer, A FieldValueConverter aggregatedValueType = metricAggregatorInfos.get(i).getValueAggregators().getAggregatedValueType(); if (aggregatedValueType.equals(LONG)) { if (starTreeDocument.metrics[i] != null) { - metricWriters.get(i).addValue(docId, (long) starTreeDocument.metrics[i]); + ((SortedNumericDocValuesWriterWrapper) (metricWriters.get(i))).addValue( + docId, + (long) starTreeDocument.metrics[i] + ); } } else if (aggregatedValueType.equals(DOUBLE)) { if (starTreeDocument.metrics[i] != null) { - metricWriters.get(i).addValue(docId, NumericUtils.doubleToSortableLong((Double) starTreeDocument.metrics[i])); + ((SortedNumericDocValuesWriterWrapper) (metricWriters.get(i))).addValue( + docId, + NumericUtils.doubleToSortableLong((Double) starTreeDocument.metrics[i]) + ); } } else { throw new IllegalStateException("Unknown metric doc value type"); @@ -376,26 +492,68 @@ private void createSortedDocValuesIndices(DocValuesConsumer docValuesConsumer, A } } } - addStarTreeDocValueFields(docValuesConsumer, dimensionWriters, dimensionFieldInfoList, numDimensions); addStarTreeDocValueFields(docValuesConsumer, metricWriters, metricFieldInfoList, metricAggregatorInfos.size()); } + /** + * Adds startree field to respective field writers + */ + private void indexDocValue(DocValuesWriterWrapper dvWriter, int docId, long value, String field) throws IOException { + if (dvWriter instanceof SortedSetDocValuesWriterWrapper) { + // TODO : cache lookupOrd to make it faster + if (isMerge) { + OrdinalMap map = mergeSortedSetDimensionsOrdinalMap.get(field); + int segmentNumber = map.getFirstSegmentNumber(value); + long segmentOrd = map.getFirstSegmentOrd(value); + ((SortedSetDocValuesWriterWrapper) dvWriter).addValue( + docId, + mergeSortedSetDimensionsMap.get(field).get(segmentNumber).lookupOrd(segmentOrd) + ); + } else { + ((SortedSetDocValuesWriterWrapper) dvWriter).addValue(docId, flushSortedSetDocValuesMap.get(field).lookupOrd(value)); + } + } else if (dvWriter instanceof SortedNumericDocValuesWriterWrapper) { + ((SortedNumericDocValuesWriterWrapper) dvWriter).addValue(docId, value); + } + } + + @SuppressWarnings("unchecked") private void addStarTreeDocValueFields( DocValuesConsumer docValuesConsumer, - List docValuesWriters, + List> docValuesWriters, FieldInfo[] fieldInfoList, int fieldCount ) throws IOException { for (int i = 0; i < fieldCount; i++) { final int writerIndex = i; - DocValuesProducer docValuesProducer = new EmptyDocValuesProducer() { - @Override - public SortedNumericDocValues getSortedNumeric(FieldInfo field) { - return docValuesWriters.get(writerIndex).getDocValues(); - } - }; - docValuesConsumer.addSortedNumericField(fieldInfoList[i], docValuesProducer); + DocValuesProducer docValuesProducer; + switch (fieldInfoList[i].getDocValuesType()) { + case SORTED_NUMERIC: + docValuesProducer = new EmptyDocValuesProducer() { + @Override + public SortedNumericDocValues getSortedNumeric(FieldInfo field) { + DocValuesWriterWrapper wrapper = (DocValuesWriterWrapper< + SortedNumericDocValues>) docValuesWriters.get(writerIndex); + return wrapper.getDocValues(); + } + }; + docValuesConsumer.addSortedNumericField(fieldInfoList[i], docValuesProducer); + break; + case SORTED_SET: + docValuesProducer = new EmptyDocValuesProducer() { + @Override + public SortedSetDocValues getSortedSet(FieldInfo field) { + DocValuesWriterWrapper wrapper = (DocValuesWriterWrapper< + SortedSetDocValues>) docValuesWriters.get(writerIndex); + return wrapper.getDocValues(); + } + }; + docValuesConsumer.addSortedSetField(fieldInfoList[i], docValuesProducer); + break; + default: + throw new IllegalStateException("Unsupported doc values type"); + } } } @@ -405,13 +563,14 @@ public SortedNumericDocValues getSortedNumeric(FieldInfo field) { protected StarTreeDocument getStarTreeDocument( int currentDocId, SequentialDocValuesIterator[] dimensionReaders, - List metricReaders + List metricReaders, + Map longValues ) throws IOException { Long[] dims = new Long[numDimensions]; int i = 0; for (SequentialDocValuesIterator dimensionValueIterator : dimensionReaders) { dimensionValueIterator.nextEntry(currentDocId); - Long val = dimensionValueIterator.value(currentDocId); + Long val = dimensionValueIterator.value(currentDocId, longValues.get(starTreeField.getDimensionNames().get(i))); dims[i] = val; i++; } @@ -431,7 +590,7 @@ protected StarTreeDocument getStarTreeDocument( /** * Sets dimensions / metric readers nnd numSegmentDocs */ - protected void setReadersAndNumSegmentDocs( + protected void setReadersAndNumSegmentDocsDuringMerge( SequentialDocValuesIterator[] dimensionReaders, List metricReaders, AtomicInteger numSegmentDocs, @@ -452,7 +611,6 @@ protected void setReadersAndNumSegmentDocs( metricReaders.add(new SequentialDocValuesIterator(starTreeValues.getMetricValuesIterator(metricFullName))); } } - numSegmentDocs.set( Integer.parseInt(starTreeValues.getAttributes().getOrDefault(SEGMENT_DOCS_COUNT, String.valueOf(DocIdSetIterator.NO_MORE_DOCS))) ); @@ -669,6 +827,14 @@ private static Long getLong(Object metric) { return metricValue; } + /** + * Sets the sortedSetDocValuesMap. + * This is needed as we need to write the ordinals and also the bytesRef associated with it + */ + void setFlushSortedSetDocValuesMap(Map flushSortedSetDocValuesMap) { + this.flushSortedSetDocValuesMap = flushSortedSetDocValuesMap; + } + /** * Merges a star-tree document into an aggregated star-tree document. * A new aggregated star-tree document is created if the aggregated document is null. @@ -799,7 +965,6 @@ private void constructStarTree(InMemoryTreeNode node, int startDocId, int endDoc constructStarTree(child, child.getStartDocId(), child.getEndDocId()); } } - } /** @@ -837,7 +1002,6 @@ private void addChildNode(InMemoryTreeNode node, int endDocId, int dimensionId, childNodeDimensionValue = nodeDimensionValue; childNodeType = StarTreeNodeType.DEFAULT.getValue(); } - InMemoryTreeNode lastNode = getNewNode(dimensionId, nodeStartDocId, endDocId, childNodeType, childNodeDimensionValue); node.addChildNode(lastNode, nodeDimensionValue); } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OffHeapStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OffHeapStarTreeBuilder.java index 09d92e3da29c3..63659ef684744 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OffHeapStarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OffHeapStarTreeBuilder.java @@ -11,8 +11,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.index.OrdinalMap; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.util.LongValues; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.util.io.IOUtils; import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; @@ -28,7 +30,9 @@ import java.util.Arrays; import java.util.Collections; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.concurrent.atomic.AtomicInteger; @@ -177,19 +181,27 @@ private Object[] getStarTreeMetricFieldValuesFromSegment(int currentDocId, List< Iterator mergeStarTrees(List starTreeValuesSubs) throws IOException { int numDocs = 0; int[] docIds; + this.isMerge = true; + Map ordinalMaps = getOrdinalMaps(starTreeValuesSubs); try { + int seg = 0; for (StarTreeValues starTreeValues : starTreeValuesSubs) { SequentialDocValuesIterator[] dimensionReaders = new SequentialDocValuesIterator[numDimensions]; List metricReaders = new ArrayList<>(); AtomicInteger numSegmentDocs = new AtomicInteger(); - setReadersAndNumSegmentDocs(dimensionReaders, metricReaders, numSegmentDocs, starTreeValues); + setReadersAndNumSegmentDocsDuringMerge(dimensionReaders, metricReaders, numSegmentDocs, starTreeValues); int currentDocId = 0; + Map longValuesMap = new LinkedHashMap<>(); + for (Map.Entry entry : ordinalMaps.entrySet()) { + longValuesMap.put(entry.getKey(), entry.getValue().getGlobalOrds(seg)); + } while (currentDocId < numSegmentDocs.get()) { - StarTreeDocument starTreeDocument = getStarTreeDocument(currentDocId, dimensionReaders, metricReaders); + StarTreeDocument starTreeDocument = getStarTreeDocument(currentDocId, dimensionReaders, metricReaders, longValuesMap); segmentDocumentFileManager.writeStarTreeDocument(starTreeDocument, true); numDocs++; currentDocId++; } + seg++; } docIds = new int[numDocs]; for (int i = 0; i < numDocs; i++) { diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java index 07142fc5c8be7..c91f4c5db98bb 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java @@ -8,8 +8,10 @@ package org.opensearch.index.compositeindex.datacube.startree.builder; import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.index.OrdinalMap; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.util.LongValues; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; @@ -21,7 +23,9 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.concurrent.atomic.AtomicInteger; @@ -113,6 +117,7 @@ public void build( */ @Override Iterator mergeStarTrees(List starTreeValuesSubs) throws IOException { + this.isMerge = true; return sortAndAggregateStarTreeDocuments(getSegmentsStarTreeDocuments(starTreeValuesSubs), true); } @@ -125,17 +130,23 @@ Iterator mergeStarTrees(List starTreeValuesSub */ StarTreeDocument[] getSegmentsStarTreeDocuments(List starTreeValuesSubs) throws IOException { List starTreeDocuments = new ArrayList<>(); + Map ordinalMaps = getOrdinalMaps(starTreeValuesSubs); + int seg = 0; for (StarTreeValues starTreeValues : starTreeValuesSubs) { - SequentialDocValuesIterator[] dimensionReaders = new SequentialDocValuesIterator[numDimensions]; List metricReaders = new ArrayList<>(); AtomicInteger numSegmentDocs = new AtomicInteger(); - setReadersAndNumSegmentDocs(dimensionReaders, metricReaders, numSegmentDocs, starTreeValues); + setReadersAndNumSegmentDocsDuringMerge(dimensionReaders, metricReaders, numSegmentDocs, starTreeValues); int currentDocId = 0; + Map longValuesMap = new LinkedHashMap<>(); + for (Map.Entry entry : ordinalMaps.entrySet()) { + longValuesMap.put(entry.getKey(), entry.getValue().getGlobalOrds(seg)); + } while (currentDocId < numSegmentDocs.get()) { - starTreeDocuments.add(getStarTreeDocument(currentDocId, dimensionReaders, metricReaders)); + starTreeDocuments.add(getStarTreeDocument(currentDocId, dimensionReaders, metricReaders, longValuesMap)); currentDocId++; } + seg++; } StarTreeDocument[] starTreeDocumentsArr = new StarTreeDocument[starTreeDocuments.size()]; return starTreeDocuments.toArray(starTreeDocumentsArr); diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilder.java index 23415ddf29132..038164c9c842d 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilder.java @@ -47,6 +47,7 @@ void build( * @param starTreeValuesSubs contains the star tree values from multiple segments * @param fieldNumberAcrossStarTrees maintains the unique field number across the fields in the star tree * @param starTreeDocValuesConsumer consumer of star-tree doc values + * * @throws IOException when we are unable to build star-tree */ void build( diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocsFileManager.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocsFileManager.java index 7e920b912731d..98c3e5c6d71e6 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocsFileManager.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocsFileManager.java @@ -14,6 +14,7 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.RandomAccessInput; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.util.io.IOUtils; import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; @@ -45,7 +46,9 @@ *

The set of 'star-tree.documents' files is maintained, and a tracker array is used to keep track of the start document ID for each file. * Once the number of files reaches a set threshold, the files are merged. * + * @opensearch.experimental */ +@ExperimentalApi public class StarTreeDocsFileManager extends AbstractDocumentsFileManager implements Closeable { private static final Logger logger = LogManager.getLogger(StarTreeDocsFileManager.class); private static final String STAR_TREE_DOC_FILE_NAME = "star-tree.documents"; diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilder.java index bc598c9aeab7c..3d1a780c1c7ef 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilder.java @@ -106,10 +106,10 @@ public void close() throws IOException { /** * Merges star tree fields from multiple segments * - * @param metaOut an IndexInput for star-tree metadata - * @param dataOut an IndexInput for star-tree data - * @param starTreeValuesSubsPerField starTreeValuesSubs per field - * @param starTreeDocValuesConsumer a consumer to write star-tree doc values + * @param metaOut an IndexInput for star-tree metadata + * @param dataOut an IndexInput for star-tree data + * @param starTreeValuesSubsPerField starTreeValuesSubs per field + * @param starTreeDocValuesConsumer a consumer to write star-tree doc values */ public void buildDuringMerge( IndexOutput metaOut, diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadata.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadata.java index 7352c215ee390..57e47b1a5b9d9 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadata.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadata.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.index.DocValuesType; import org.apache.lucene.store.IndexInput; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.index.compositeindex.CompositeIndexMetadata; @@ -62,9 +63,10 @@ public class StarTreeMetadata extends CompositeIndexMetadata { private final String starTreeFieldType; /** - * List of dimension fields used in the star-tree. + * Map of dimension fields to their associated DocValuesType.Insertion order needs to be maintained + * as it dictates dimensionSplitOrder */ - private final List dimensionFields; + LinkedHashMap dimensionFieldsToDocValuesMap; /** * List of metrics, containing field names and associated metric statistics. @@ -128,7 +130,7 @@ public StarTreeMetadata( this.starTreeFieldType = this.getCompositeFieldType().getName(); this.version = version; this.numberOfNodes = readNumberOfNodes(); - this.dimensionFields = readStarTreeDimensions(); + this.dimensionFieldsToDocValuesMap = readStarTreeDimensions(); this.metrics = readMetricEntries(); this.segmentAggregatedDocCount = readSegmentAggregatedDocCount(); this.starTreeDocCount = readStarTreeDocCount(); @@ -151,7 +153,7 @@ public StarTreeMetadata( * @param compositeFieldName name of the composite field. Here, name of the star-tree field. * @param compositeFieldType type of the composite field. Here, STAR_TREE field. * @param version The version of the star tree stored in the segments. - * @param dimensionFields list of dimension fields + * @param dimensionFieldsToDocValuesMap map of dimensionFields to docValues * @param metrics list of metric entries * @param segmentAggregatedDocCount segment aggregated doc count * @param starTreeDocCount the total number of star tree documents for the segment @@ -167,7 +169,7 @@ public StarTreeMetadata( IndexInput meta, Integer version, Integer numberOfNodes, - List dimensionFields, + LinkedHashMap dimensionFieldsToDocValuesMap, List metrics, Integer segmentAggregatedDocCount, Integer starTreeDocCount, @@ -183,7 +185,7 @@ public StarTreeMetadata( this.starTreeFieldType = compositeFieldType.getName(); this.version = version; this.numberOfNodes = numberOfNodes; - this.dimensionFields = dimensionFields; + this.dimensionFieldsToDocValuesMap = dimensionFieldsToDocValuesMap; this.metrics = metrics; this.segmentAggregatedDocCount = segmentAggregatedDocCount; this.starTreeDocCount = starTreeDocCount; @@ -202,15 +204,14 @@ private int readDimensionsCount() throws IOException { return meta.readVInt(); } - private List readStarTreeDimensions() throws IOException { + private LinkedHashMap readStarTreeDimensions() throws IOException { int dimensionCount = readDimensionsCount(); - List dimensionFields = new ArrayList<>(); + LinkedHashMap dimensionFieldsToDocValuesMap = new LinkedHashMap<>(); for (int i = 0; i < dimensionCount; i++) { - dimensionFields.add(meta.readString()); + dimensionFieldsToDocValuesMap.put(meta.readString(), getDocValuesType(meta, meta.readByte())); } - - return dimensionFields; + return dimensionFieldsToDocValuesMap; } private int readMetricsCount() throws IOException { @@ -314,8 +315,8 @@ public String getStarTreeFieldType() { * * @return star-tree dimension field numbers */ - public List getDimensionFields() { - return dimensionFields; + public Map getDimensionFields() { + return dimensionFieldsToDocValuesMap; } /** @@ -405,4 +406,23 @@ public int getVersion() { public int getNumberOfNodes() { return numberOfNodes; } + + private static DocValuesType getDocValuesType(IndexInput input, byte b) throws IOException { + switch (b) { + case 0: + return DocValuesType.NONE; + case 1: + return DocValuesType.NUMERIC; + case 2: + return DocValuesType.BINARY; + case 3: + return DocValuesType.SORTED; + case 4: + return DocValuesType.SORTED_SET; + case 5: + return DocValuesType.SORTED_NUMERIC; + default: + throw new CorruptIndexException("invalid docvalues byte: " + b, input); + } + } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadataWriter.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadataWriter.java index 42e6f3c59866a..569692ce18893 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadataWriter.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadataWriter.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.DocValuesType; import org.apache.lucene.store.IndexOutput; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.aggregators.MetricAggregatorInfo; @@ -130,8 +131,9 @@ private static void writeMeta( metaOut.writeVInt(starTreeField.getDimensionNames().size()); // dimensions - for (String dim : starTreeField.getDimensionNames()) { - metaOut.writeString(dim); + for (int i = 0; i < starTreeField.getDimensionNames().size(); i++) { + metaOut.writeString(starTreeField.getDimensionNames().get(i)); + metaOut.writeByte(docValuesByte(starTreeField.getDimensionDocValueTypes().get(i))); } // number of metrics @@ -171,4 +173,24 @@ private static void writeMeta( metaOut.writeVLong(dataFileLength); } + + private static byte docValuesByte(DocValuesType type) { + switch (type) { + case NONE: + return 0; + case NUMERIC: + return 1; + case BINARY: + return 2; + case SORTED: + return 3; + case SORTED_SET: + return 4; + case SORTED_NUMERIC: + return 5; + default: + // BUG + throw new AssertionError("unhandled DocValuesType: " + type); + } + } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/index/StarTreeValues.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/index/StarTreeValues.java index 003ebeafeae45..6a13e6e789f3a 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/index/StarTreeValues.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/index/StarTreeValues.java @@ -9,9 +9,12 @@ package org.opensearch.index.compositeindex.datacube.startree.index; import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.SegmentReadState; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.store.IndexInput; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.index.compositeindex.CompositeIndexMetadata; @@ -25,6 +28,7 @@ import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeFactory; import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNode; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedSetStarTreeValuesIterator; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.StarTreeValuesIterator; import java.io.IOException; @@ -35,7 +39,6 @@ import java.util.Set; import java.util.function.Supplier; -import static org.opensearch.index.codec.composite.composite912.Composite912DocValuesReader.getSortedNumericDocValues; import static org.opensearch.index.compositeindex.CompositeIndexConstants.SEGMENT_DOCS_COUNT; import static org.opensearch.index.compositeindex.CompositeIndexConstants.STAR_TREE_DOCS_COUNT; import static org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeUtils.fullyQualifiedFieldNameForStarTreeDimensionsDocValues; @@ -128,8 +131,15 @@ public StarTreeValues( // build dimensions List readDimensions = new ArrayList<>(); - for (String dimension : starTreeMetadata.getDimensionFields()) { - readDimensions.add(new ReadDimension(dimension)); + for (String dimension : starTreeMetadata.getDimensionFields().keySet()) { + readDimensions.add( + new ReadDimension( + dimension, + readState.fieldInfos.fieldInfo( + fullyQualifiedFieldNameForStarTreeDimensionsDocValues(starTreeMetadata.getCompositeFieldName(), dimension) + ).getDocValuesType() + ) + ); } // star-tree field @@ -151,19 +161,25 @@ public StarTreeValues( metricValuesIteratorMap = new LinkedHashMap<>(); // get doc id set iterators for dimensions - for (String dimension : starTreeMetadata.getDimensionFields()) { + for (String dimension : starTreeMetadata.getDimensionFields().keySet()) { dimensionValuesIteratorMap.put(dimension, () -> { try { - SortedNumericDocValues dimensionSortedNumericDocValues = null; + FieldInfo dimensionfieldInfo = null; if (readState != null) { - FieldInfo dimensionfieldInfo = readState.fieldInfos.fieldInfo( + dimensionfieldInfo = readState.fieldInfos.fieldInfo( fullyQualifiedFieldNameForStarTreeDimensionsDocValues(starTreeField.getName(), dimension) ); - if (dimensionfieldInfo != null) { - dimensionSortedNumericDocValues = compositeDocValuesProducer.getSortedNumeric(dimensionfieldInfo); - } } - return new SortedNumericStarTreeValuesIterator(getSortedNumericDocValues(dimensionSortedNumericDocValues)); + assert dimensionfieldInfo != null; + if (dimensionfieldInfo.getDocValuesType().equals(DocValuesType.SORTED_SET)) { + SortedSetDocValues dimensionSortedSetDocValues = compositeDocValuesProducer.getSortedSet(dimensionfieldInfo); + return new SortedSetStarTreeValuesIterator(getSortedSetDocValues(dimensionSortedSetDocValues)); + } else { + SortedNumericDocValues dimensionSortedNumericDocValues = compositeDocValuesProducer.getSortedNumeric( + dimensionfieldInfo + ); + return new SortedNumericStarTreeValuesIterator(getSortedNumericDocValues(dimensionSortedNumericDocValues)); + } } catch (IOException e) { throw new RuntimeException("Error loading dimension StarTreeValuesIterator", e); } @@ -272,4 +288,30 @@ public StarTreeValuesIterator getMetricValuesIterator(String fullyQualifiedMetri public int getStarTreeDocumentCount() { return starTreeMetadata.getStarTreeDocCount(); } + + /** + * Returns the sorted numeric doc values for the given sorted numeric field. + * If the sorted numeric field is null, it returns an empty doc id set iterator. + *

+ * Sorted numeric field can be null for cases where the segment doesn't hold a particular value. + * + * @param sortedNumeric the sorted numeric doc values for a field + * @return empty sorted numeric values if the field is not present, else sortedNumeric + */ + static SortedNumericDocValues getSortedNumericDocValues(SortedNumericDocValues sortedNumeric) { + return sortedNumeric == null ? DocValues.emptySortedNumeric() : sortedNumeric; + } + + /** + * Returns the sortedSet doc values for the given sortedSet field. + * If the sortedSet field is null, it returns an empty doc id set iterator. + *

+ * SortedSet field can be null for cases where the segment doesn't hold a particular value. + * + * @param sortedSetDv the sortedSet doc values for a field + * @return empty sortedSet values if the field is not present, else sortedSetDv + */ + static SortedSetDocValues getSortedSetDocValues(SortedSetDocValues sortedSetDv) { + return sortedSetDv == null ? DocValues.emptySortedSet() : sortedSetDv; + } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIterator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIterator.java index 9029a451ca4d9..c4d3526648cac 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIterator.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIterator.java @@ -9,8 +9,10 @@ package org.opensearch.index.compositeindex.datacube.startree.utils; +import org.apache.lucene.util.LongValues; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedSetStarTreeValuesIterator; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.StarTreeValuesIterator; import java.io.IOException; @@ -81,6 +83,33 @@ public Long value(int currentEntryId) throws IOException { } return ((SortedNumericStarTreeValuesIterator) starTreeValuesIterator).nextValue(); + } else if (starTreeValuesIterator instanceof SortedSetStarTreeValuesIterator) { + if (currentEntryId < 0) { + throw new IllegalStateException("invalid entry id to fetch the next value"); + } + if (currentEntryId == StarTreeValuesIterator.NO_MORE_ENTRIES) { + throw new IllegalStateException("StarTreeValuesIterator is already exhausted"); + } + if (entryId == StarTreeValuesIterator.NO_MORE_ENTRIES || entryId != currentEntryId) { + return null; + } + return ((SortedSetStarTreeValuesIterator) starTreeValuesIterator).nextOrd(); + } else { + throw new IllegalStateException("Unsupported Iterator requested for SequentialDocValuesIterator"); + } + } + + public Long value(int currentEntryId, LongValues globalOrdinalLongValues) throws IOException { + if (starTreeValuesIterator instanceof SortedNumericStarTreeValuesIterator) { + return value(currentEntryId); + } else if (starTreeValuesIterator instanceof SortedSetStarTreeValuesIterator) { + assert globalOrdinalLongValues != null; + Long val = value(currentEntryId); + // convert local ordinal to global ordinal + if (val != null) { + val = globalOrdinalLongValues.get(val); + } + return val; } else { throw new IllegalStateException("Unsupported Iterator requested for SequentialDocValuesIterator"); } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeUtils.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeUtils.java index 2aae0d4ca7e29..240a727678d6f 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeUtils.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeUtils.java @@ -16,6 +16,7 @@ import java.util.Collections; import java.util.List; +import java.util.Map; /** * Util class for building star tree @@ -67,14 +68,17 @@ public static String fullyQualifiedFieldNameForStarTreeMetricsDocValues(String s * @param fields field names * @return field infos */ - public static FieldInfo[] getFieldInfoList(List fields) { + public static FieldInfo[] getFieldInfoList(List fields, Map dimDocValuesTypeMap) { FieldInfo[] fieldInfoList = new FieldInfo[fields.size()]; - // field number is not really used. We depend on unique field names to get the desired iterator int fieldNumber = 0; - for (String fieldName : fields) { - fieldInfoList[fieldNumber] = getFieldInfo(fieldName, DocValuesType.SORTED_NUMERIC, fieldNumber); + fieldInfoList[fieldNumber] = getFieldInfo( + fieldName, + // default is sortedNumeric since all metrics right now are sorted numeric + dimDocValuesTypeMap.getOrDefault(fieldName, DocValuesType.SORTED_NUMERIC), + fieldNumber + ); fieldNumber++; } return fieldInfoList; diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/SortedSetStarTreeValuesIterator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/SortedSetStarTreeValuesIterator.java new file mode 100644 index 0000000000000..0cddffe5877e9 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/SortedSetStarTreeValuesIterator.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.utils.iterator; + +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.automaton.CompiledAutomaton; +import org.opensearch.common.annotation.ExperimentalApi; + +import java.io.IOException; + +/** + * Wrapper iterator class for StarTree index to traverse through SortedNumericDocValues + * + * @opensearch.experimental + */ +@ExperimentalApi +public class SortedSetStarTreeValuesIterator extends StarTreeValuesIterator { + + public SortedSetStarTreeValuesIterator(DocIdSetIterator docIdSetIterator) { + super(docIdSetIterator); + } + + public long nextOrd() throws IOException { + return ((SortedSetDocValues) docIdSetIterator).nextOrd(); + } + + public int docValueCount() { + return ((SortedSetDocValues) docIdSetIterator).docValueCount(); + } + + public BytesRef lookupOrd(long ord) throws IOException { + return ((SortedSetDocValues) docIdSetIterator).lookupOrd(ord); + } + + public long getValueCount() { + return ((SortedSetDocValues) docIdSetIterator).getValueCount(); + } + + public long lookupTerm(BytesRef key) throws IOException { + return ((SortedSetDocValues) docIdSetIterator).lookupTerm(key); + } + + public TermsEnum termsEnum() throws IOException { + return ((SortedSetDocValues) docIdSetIterator).termsEnum(); + } + + public TermsEnum intersect(CompiledAutomaton automaton) throws IOException { + return ((SortedSetDocValues) docIdSetIterator).intersect(automaton); + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java index 54a1aead5fcc7..df14a5811f6a0 100644 --- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java @@ -59,6 +59,7 @@ import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.analysis.NamedAnalyzer; +import org.opensearch.index.compositeindex.datacube.DimensionType; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; import org.opensearch.index.query.QueryShardContext; @@ -73,6 +74,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.function.Supplier; import static org.opensearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; @@ -254,6 +256,11 @@ public KeywordFieldMapper build(BuilderContext context) { this ); } + + @Override + public Optional getSupportedDataCubeDimensionType() { + return Optional.of(DimensionType.KEYWORD); + } } public static final TypeParser PARSER = new TypeParser((n, c) -> new Builder(n, c.getIndexAnalyzers())); diff --git a/server/src/test/java/org/opensearch/index/codec/composite/SortedSetDocValuesWriterWrapperTests.java b/server/src/test/java/org/opensearch/index/codec/composite/SortedSetDocValuesWriterWrapperTests.java new file mode 100644 index 0000000000000..b0fdd712beafb --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/composite/SortedSetDocValuesWriterWrapperTests.java @@ -0,0 +1,98 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.SortedSetDocValuesWriterWrapper; +import org.apache.lucene.index.VectorEncoding; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.apache.lucene.util.ByteBlockPool; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.Counter; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Collections; + +public class SortedSetDocValuesWriterWrapperTests extends OpenSearchTestCase { + + private SortedSetDocValuesWriterWrapper wrapper; + private FieldInfo fieldInfo; + private Counter counter; + + @Override + public void setUp() throws Exception { + super.setUp(); + fieldInfo = new FieldInfo( + "field", + 1, + false, + false, + true, + IndexOptions.NONE, + DocValuesType.NONE, + -1, + Collections.emptyMap(), + 0, + 0, + 0, + 0, + VectorEncoding.FLOAT32, + VectorSimilarityFunction.EUCLIDEAN, + false, + false + ); + counter = Counter.newCounter(); + ByteBlockPool.DirectTrackingAllocator byteBlockAllocator = new ByteBlockPool.DirectTrackingAllocator(counter); + ByteBlockPool docValuesBytePool = new ByteBlockPool(byteBlockAllocator); + wrapper = new SortedSetDocValuesWriterWrapper(fieldInfo, counter, docValuesBytePool); + } + + public void testAddValue() throws IOException { + wrapper.addValue(0, new BytesRef("text1")); + wrapper.addValue(1, new BytesRef("text2")); + wrapper.addValue(2, new BytesRef("text3")); + + SortedSetDocValues docValues = wrapper.getDocValues(); + assertNotNull(docValues); + + assertEquals(0, docValues.nextDoc()); + assertEquals(0, docValues.nextOrd()); + assertEquals(1, docValues.nextDoc()); + assertEquals(1, docValues.nextOrd()); + assertEquals(2, docValues.nextDoc()); + assertEquals(2, docValues.nextOrd()); + } + + public void testGetDocValues() { + SortedSetDocValues docValues = wrapper.getDocValues(); + assertNotNull(docValues); + } + + public void testMultipleValues() throws IOException { + wrapper.addValue(0, new BytesRef("text1")); + wrapper.addValue(0, new BytesRef("text2")); + wrapper.addValue(1, new BytesRef("text3")); + + SortedSetDocValues docValues = wrapper.getDocValues(); + assertNotNull(docValues); + + assertEquals(0, docValues.nextDoc()); + assertEquals(0, docValues.nextOrd()); + assertEquals(1, docValues.nextOrd()); + assertEquals(-1, docValues.nextOrd()); + + assertEquals(1, docValues.nextDoc()); + assertEquals(2, docValues.nextOrd()); + assertEquals(-1, docValues.nextOrd()); + } +} diff --git a/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/AbstractStarTreeDVFormatTests.java b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/AbstractStarTreeDVFormatTests.java new file mode 100644 index 0000000000000..4dfd8c08575f2 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/AbstractStarTreeDVFormatTests.java @@ -0,0 +1,126 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite912.datacube.startree; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.lucene912.Lucene912Codec; +import org.apache.lucene.tests.index.BaseDocValuesFormatTestCase; +import org.apache.lucene.tests.util.LuceneTestCase; +import org.opensearch.Version; +import org.opensearch.cluster.ClusterModule; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.MapperTestUtils; +import org.opensearch.index.codec.composite.composite912.Composite912Codec; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.indices.IndicesModule; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import static org.opensearch.common.util.FeatureFlags.STAR_TREE_INDEX; + +/** + * Abstract star tree doc values Lucene tests + */ +@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") +public abstract class AbstractStarTreeDVFormatTests extends BaseDocValuesFormatTestCase { + MapperService mapperService = null; + StarTreeFieldConfiguration.StarTreeBuildMode buildMode; + + public AbstractStarTreeDVFormatTests(StarTreeFieldConfiguration.StarTreeBuildMode buildMode) { + this.buildMode = buildMode; + } + + @ParametersFactory + public static Collection parameters() { + List parameters = new ArrayList<>(); + parameters.add(new Object[] { StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP }); + parameters.add(new Object[] { StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP }); + return parameters; + } + + @BeforeClass + public static void createMapper() throws Exception { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(STAR_TREE_INDEX, "true").build()); + } + + @AfterClass + public static void clearMapper() { + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + } + + @After + public void teardown() throws IOException { + mapperService.close(); + } + + @Override + protected Codec getCodec() { + final Logger testLogger = LogManager.getLogger(StarTreeDocValuesFormatTests.class); + + try { + mapperService = createMapperService(getMapping()); + } catch (IOException e) { + throw new RuntimeException(e); + } + Codec codec = new Composite912Codec(Lucene912Codec.Mode.BEST_SPEED, mapperService, testLogger); + return codec; + } + + public static MapperService createMapperService(XContentBuilder builder) throws IOException { + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) + .build(); + IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).putMapping(builder.toString()).build(); + IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); + MapperService mapperService = MapperTestUtils.newMapperServiceWithHelperAnalyzer( + new NamedXContentRegistry(ClusterModule.getNamedXWriteables()), + createTempDir(), + settings, + indicesModule, + "test" + ); + mapperService.merge(indexMetadata, MapperService.MergeReason.INDEX_TEMPLATE); + return mapperService; + } + + abstract XContentBuilder getMapping() throws IOException; + + public static XContentBuilder topMapping(CheckedConsumer buildFields) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder().startObject().startObject("_doc"); + buildFields.accept(builder); + return builder.endObject().endObject(); + } + +} diff --git a/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeDocValuesFormatTests.java b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeDocValuesFormatTests.java index f081cadc1362c..03798c6e4ce55 100644 --- a/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeDocValuesFormatTests.java +++ b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeDocValuesFormatTests.java @@ -8,14 +8,9 @@ package org.opensearch.index.codec.composite912.datacube.startree; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene912.Lucene912Codec; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.StringField; import org.apache.lucene.index.DirectoryReader; @@ -24,48 +19,25 @@ import org.apache.lucene.index.SegmentReader; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; -import org.apache.lucene.tests.index.BaseDocValuesFormatTestCase; import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.tests.util.TestUtil; -import org.opensearch.Version; -import org.opensearch.cluster.ClusterModule; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.CheckedConsumer; import org.opensearch.common.lucene.Lucene; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.core.common.unit.ByteSizeUnit; -import org.opensearch.core.common.unit.ByteSizeValue; -import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.MapperTestUtils; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.codec.composite.CompositeIndexReader; -import org.opensearch.index.codec.composite.composite912.Composite912Codec; import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; -import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; import org.opensearch.index.compositeindex.datacube.startree.StarTreeTestUtils; import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.NumberFieldMapper; -import org.opensearch.indices.IndicesModule; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.BeforeClass; import java.io.IOException; import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import static org.opensearch.common.util.FeatureFlags.STAR_TREE_INDEX; import static org.opensearch.index.compositeindex.CompositeIndexConstants.STAR_TREE_DOCS_COUNT; import static org.opensearch.index.compositeindex.datacube.startree.StarTreeTestUtils.assertStarTreeDocuments; @@ -73,48 +45,10 @@ * Star tree doc values Lucene tests */ @LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") -public class StarTreeDocValuesFormatTests extends BaseDocValuesFormatTestCase { - MapperService mapperService = null; - StarTreeFieldConfiguration.StarTreeBuildMode buildMode; +public class StarTreeDocValuesFormatTests extends AbstractStarTreeDVFormatTests { public StarTreeDocValuesFormatTests(StarTreeFieldConfiguration.StarTreeBuildMode buildMode) { - this.buildMode = buildMode; - } - - @ParametersFactory - public static Collection parameters() { - List parameters = new ArrayList<>(); - parameters.add(new Object[] { StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP }); - parameters.add(new Object[] { StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP }); - return parameters; - } - - @BeforeClass - public static void createMapper() throws Exception { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(STAR_TREE_INDEX, "true").build()); - } - - @AfterClass - public static void clearMapper() { - FeatureFlags.initializeFeatureFlags(Settings.EMPTY); - } - - @After - public void teardown() throws IOException { - mapperService.close(); - } - - @Override - protected Codec getCodec() { - final Logger testLogger = LogManager.getLogger(StarTreeDocValuesFormatTests.class); - - try { - mapperService = createMapperService(getExpandedMapping()); - } catch (IOException e) { - throw new RuntimeException(e); - } - Codec codec = new Composite912Codec(Lucene912Codec.Mode.BEST_SPEED, mapperService, testLogger); - return codec; + super(buildMode); } public void testStarTreeDocValues() throws IOException { @@ -124,24 +58,24 @@ public void testStarTreeDocValues() throws IOException { RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); Document doc = new Document(); doc.add(new SortedNumericDocValuesField("sndv", 1)); - doc.add(new SortedNumericDocValuesField("dv", 1)); - doc.add(new SortedNumericDocValuesField("field", -1)); + doc.add(new SortedNumericDocValuesField("dv1", 1)); + doc.add(new SortedNumericDocValuesField("field1", -1)); iw.addDocument(doc); doc = new Document(); doc.add(new SortedNumericDocValuesField("sndv", 1)); - doc.add(new SortedNumericDocValuesField("dv", 1)); - doc.add(new SortedNumericDocValuesField("field", -1)); + doc.add(new SortedNumericDocValuesField("dv1", 1)); + doc.add(new SortedNumericDocValuesField("field1", -1)); iw.addDocument(doc); doc = new Document(); iw.forceMerge(1); doc.add(new SortedNumericDocValuesField("sndv", 2)); - doc.add(new SortedNumericDocValuesField("dv", 2)); - doc.add(new SortedNumericDocValuesField("field", -2)); + doc.add(new SortedNumericDocValuesField("dv1", 2)); + doc.add(new SortedNumericDocValuesField("field1", -2)); iw.addDocument(doc); doc = new Document(); doc.add(new SortedNumericDocValuesField("sndv", 2)); - doc.add(new SortedNumericDocValuesField("dv", 2)); - doc.add(new SortedNumericDocValuesField("field", -2)); + doc.add(new SortedNumericDocValuesField("dv1", 2)); + doc.add(new SortedNumericDocValuesField("field1", -2)); iw.addDocument(doc); iw.forceMerge(1); iw.close(); @@ -217,8 +151,9 @@ public void testStarTreeDocValuesWithDeletions() throws IOException { Directory directory = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(null); conf.setMergePolicy(newLogMergePolicy()); + conf.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD); + conf.setOpenMode(IndexWriterConfig.OpenMode.CREATE); RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); - int iterations = 3; Map map = new HashMap<>(); List allIds = new ArrayList<>(); @@ -239,17 +174,25 @@ public void testStarTreeDocValuesWithDeletions() throws IOException { doc.add(new SortedNumericDocValuesField("dv", dvValue)); map.put(sndvValue + "-" + dvValue, fieldValue + map.getOrDefault(sndvValue + "-" + dvValue, 0)); + doc.add(new NumericDocValuesField("field-ndv", fieldValue)); + iw.addDocument(doc); } iw.flush(); } iw.commit(); - // Delete random number of documents + // Update random number of documents int docsToDelete = random().nextInt(9); // Delete up to 9 documents for (int i = 0; i < docsToDelete; i++) { if (!allIds.isEmpty()) { String idToDelete = allIds.remove(random().nextInt(allIds.size() - 1)); - iw.deleteDocuments(new Term("_id", idToDelete)); + Document doc = new Document(); + doc.add(new NumericDocValuesField("field-ndv", 1L)); + iw.w.softUpdateDocuments( + new Term("_id", idToDelete), + List.of(doc), + new NumericDocValuesField(Lucene.SOFT_DELETES_FIELD, 1) + ); allIds.remove(idToDelete); } } @@ -307,6 +250,11 @@ public void testStarTreeDocValuesWithDeletions() throws IOException { directory.close(); } + @Override + protected XContentBuilder getMapping() throws IOException { + return getExpandedMapping(); + } + public static XContentBuilder getExpandedMapping() throws IOException { return topMapping(b -> { b.startObject("composite"); @@ -319,12 +267,12 @@ public static XContentBuilder getExpandedMapping() throws IOException { b.field("name", "sndv"); b.endObject(); b.startObject(); - b.field("name", "dv"); + b.field("name", "dv1"); b.endObject(); b.endArray(); b.startArray("metrics"); b.startObject(); - b.field("name", "field"); + b.field("name", "field1"); b.startArray("stats"); b.value("sum"); b.value("value_count"); @@ -351,40 +299,13 @@ public static XContentBuilder getExpandedMapping() throws IOException { b.startObject("sndv"); b.field("type", "integer"); b.endObject(); - b.startObject("dv"); + b.startObject("dv1"); b.field("type", "integer"); b.endObject(); - b.startObject("field"); + b.startObject("field1"); b.field("type", "integer"); b.endObject(); b.endObject(); }); } - - public static XContentBuilder topMapping(CheckedConsumer buildFields) throws IOException { - XContentBuilder builder = XContentFactory.jsonBuilder().startObject().startObject("_doc"); - buildFields.accept(builder); - return builder.endObject().endObject(); - } - - public static MapperService createMapperService(XContentBuilder builder) throws IOException { - Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) - .build(); - IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).putMapping(builder.toString()).build(); - IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); - MapperService mapperService = MapperTestUtils.newMapperServiceWithHelperAnalyzer( - new NamedXContentRegistry(ClusterModule.getNamedXWriteables()), - createTempDir(), - settings, - indicesModule, - "test" - ); - mapperService.merge(indexMetadata, MapperService.MergeReason.INDEX_TEMPLATE); - return mapperService; - } } diff --git a/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeKeywordDocValuesFormatTests.java b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeKeywordDocValuesFormatTests.java new file mode 100644 index 0000000000000..402ed1dbee98a --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeKeywordDocValuesFormatTests.java @@ -0,0 +1,572 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite912.datacube.startree; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.index.Term; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.util.LuceneTestCase; +import org.apache.lucene.tests.util.TestUtil; +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeTestUtils; +import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedSetStarTreeValuesIterator; +import org.opensearch.index.mapper.NumberFieldMapper; + +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.opensearch.index.compositeindex.CompositeIndexConstants.STAR_TREE_DOCS_COUNT; +import static org.opensearch.index.compositeindex.datacube.startree.StarTreeTestUtils.assertStarTreeDocuments; + +/** + * Star tree doc values Lucene tests + */ +@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") +public class StarTreeKeywordDocValuesFormatTests extends AbstractStarTreeDVFormatTests { + + public StarTreeKeywordDocValuesFormatTests(StarTreeFieldConfiguration.StarTreeBuildMode buildMode) { + super(buildMode); + } + + public void testStarTreeKeywordDocValues() throws IOException { + Directory directory = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig(null); + conf.setMergePolicy(newLogMergePolicy()); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); + Document doc = new Document(); + doc.add(new StringField("_id", "1", Field.Store.NO)); + doc.add(new SortedNumericDocValuesField("sndv", 1)); + doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text1"))); + doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2"))); + iw.addDocument(doc); + doc = new Document(); + doc.add(new StringField("_id", "2", Field.Store.NO)); + doc.add(new SortedNumericDocValuesField("sndv", 1)); + doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text11"))); + doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22"))); + iw.addDocument(doc); + iw.flush(); + iw.deleteDocuments(new Term("_id", "2")); + iw.flush(); + doc = new Document(); + doc.add(new StringField("_id", "3", Field.Store.NO)); + doc.add(new SortedNumericDocValuesField("sndv", 2)); + doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text1"))); + doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2"))); + iw.addDocument(doc); + doc = new Document(); + doc.add(new StringField("_id", "4", Field.Store.NO)); + doc.add(new SortedNumericDocValuesField("sndv", 2)); + doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text11"))); + doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22"))); + iw.addDocument(doc); + iw.flush(); + iw.deleteDocuments(new Term("_id", "4")); + iw.flush(); + iw.forceMerge(1); + + iw.close(); + + DirectoryReader ir = maybeWrapWithMergingReader(DirectoryReader.open(directory)); + TestUtil.checkReader(ir); + assertEquals(1, ir.leaves().size()); + + // Star tree documents + /** + keyword1 keyword2 | [ sum, value_count, min, max[sndv]] , doc_count + [0, 0] | [3.0, 2.0, 1.0, 2.0, 2.0] + [1, 1] | [3.0, 2.0, 1.0, 2.0, 2.0] + [null, 0] | [3.0, 2.0, 1.0, 2.0, 2.0] + [null, 1] | [3.0, 2.0, 1.0, 2.0, 2.0] + [null, null] | [6.0, 4.0, 1.0, 2.0, 4.0] + */ + StarTreeDocument[] expectedStarTreeDocuments = new StarTreeDocument[5]; + expectedStarTreeDocuments[0] = new StarTreeDocument(new Long[] { 0L, 0L }, new Double[] { 3.0, 2.0, 1.0, 2.0, 2.0 }); + expectedStarTreeDocuments[1] = new StarTreeDocument(new Long[] { 1L, 1L }, new Double[] { 3.0, 2.0, 1.0, 2.0, 2.0 }); + expectedStarTreeDocuments[2] = new StarTreeDocument(new Long[] { null, 0L }, new Double[] { 3.0, 2.0, 1.0, 2.0, 2.0 }); + expectedStarTreeDocuments[3] = new StarTreeDocument(new Long[] { null, 1L }, new Double[] { 3.0, 2.0, 1.0, 2.0, 2.0 }); + expectedStarTreeDocuments[4] = new StarTreeDocument(new Long[] { null, null }, new Double[] { 6.0, 4.0, 1.0, 2.0, 4.0 }); + + for (LeafReaderContext context : ir.leaves()) { + SegmentReader reader = Lucene.segmentReader(context.reader()); + CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader(); + List compositeIndexFields = starTreeDocValuesReader.getCompositeIndexFields(); + + for (CompositeIndexFieldInfo compositeIndexFieldInfo : compositeIndexFields) { + StarTreeValues starTreeValues = (StarTreeValues) starTreeDocValuesReader.getCompositeIndexValues(compositeIndexFieldInfo); + StarTreeDocument[] starTreeDocuments = StarTreeTestUtils.getSegmentsStarTreeDocuments( + List.of(starTreeValues), + List.of( + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.LONG, + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.LONG + ), + Integer.parseInt(starTreeValues.getAttributes().get(STAR_TREE_DOCS_COUNT)) + ); + assertStarTreeDocuments(starTreeDocuments, expectedStarTreeDocuments); + } + } + ir.close(); + directory.close(); + } + + public void testStarTreeKeywordDocValuesWithDeletions() throws IOException { + Directory directory = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig(null); + conf.setMergePolicy(newLogMergePolicy()); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); + + int iterations = 3; + Set allIds = new HashSet<>(); + Map documents = new HashMap<>(); + Map map = new HashMap<>(); + for (int iter = 0; iter < iterations; iter++) { + // Add 10 documents + for (int i = 0; i < 10; i++) { + String id = String.valueOf(random().nextInt() + 1); + allIds.add(id); + Document doc = new Document(); + doc.add(new StringField("_id", id, Field.Store.YES)); + int sndvValue = random().nextInt(5) + 1; + doc.add(new SortedNumericDocValuesField("sndv", sndvValue)); + + String keyword1Value = "text" + random().nextInt(3); + + doc.add(new SortedSetDocValuesField("keyword1", new BytesRef(keyword1Value))); + String keyword2Value = "text" + random().nextInt(3); + + doc.add(new SortedSetDocValuesField("keyword2", new BytesRef(keyword2Value))); + map.put(keyword1Value + "-" + keyword2Value, sndvValue + map.getOrDefault(keyword1Value + "-" + keyword2Value, 0)); + iw.addDocument(doc); + documents.put(id, doc); + } + + iw.flush(); + + // Update random number of documents + int docsToDelete = random().nextInt(5); // Delete up to 5 documents + for (int i = 0; i < docsToDelete; i++) { + if (!allIds.isEmpty()) { + String idToDelete = allIds.iterator().next(); + Document doc = new Document(); + doc.add(new NumericDocValuesField("field-ndv", 1L)); + iw.w.softUpdateDocuments( + new Term("_id", idToDelete), + List.of(doc), + new NumericDocValuesField(Lucene.SOFT_DELETES_FIELD, 1) + ); + allIds.remove(idToDelete); + documents.remove(idToDelete); + } + } + + iw.flush(); + } + + iw.forceMerge(1); + iw.close(); + + DirectoryReader ir = maybeWrapWithMergingReader(DirectoryReader.open(directory)); + TestUtil.checkReader(ir); + assertEquals(1, ir.leaves().size()); + + // Assert star tree documents + for (LeafReaderContext context : ir.leaves()) { + SegmentReader reader = Lucene.segmentReader(context.reader()); + CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader(); + List compositeIndexFields = starTreeDocValuesReader.getCompositeIndexFields(); + + for (CompositeIndexFieldInfo compositeIndexFieldInfo : compositeIndexFields) { + StarTreeValues starTreeValues = (StarTreeValues) starTreeDocValuesReader.getCompositeIndexValues(compositeIndexFieldInfo); + StarTreeDocument[] actualStarTreeDocuments = StarTreeTestUtils.getSegmentsStarTreeDocuments( + List.of(starTreeValues), + List.of( + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.LONG, + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.LONG + ), + Integer.parseInt(starTreeValues.getAttributes().get(STAR_TREE_DOCS_COUNT)) + ); + SortedSetStarTreeValuesIterator k1 = (SortedSetStarTreeValuesIterator) starTreeValues.getDimensionValuesIterator( + "keyword1" + ); + SortedSetStarTreeValuesIterator k2 = (SortedSetStarTreeValuesIterator) starTreeValues.getDimensionValuesIterator( + "keyword2" + ); + for (StarTreeDocument starDoc : actualStarTreeDocuments) { + String keyword1 = null; + if (starDoc.dimensions[0] != null) { + keyword1 = k1.lookupOrd(starDoc.dimensions[0]).utf8ToString(); + } + + String keyword2 = null; + if (starDoc.dimensions[1] != null) { + keyword2 = k2.lookupOrd(starDoc.dimensions[1]).utf8ToString(); + } + double metric = (double) starDoc.metrics[0]; + if (map.containsKey(keyword1 + "-" + keyword2)) { + assertEquals((int) map.get(keyword1 + "-" + keyword2), (int) metric); + } + } + } + } + + ir.close(); + directory.close(); + } + + public void testStarKeywordDocValuesWithMissingDocs() throws IOException { + Directory directory = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig(null); + conf.setMergePolicy(newLogMergePolicy()); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); + Document doc = new Document(); + doc.add(new SortedNumericDocValuesField("sndv", 1)); + doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2"))); + iw.addDocument(doc); + doc = new Document(); + doc.add(new SortedNumericDocValuesField("sndv", 1)); + doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22"))); + iw.addDocument(doc); + iw.forceMerge(1); + doc = new Document(); + doc.add(new SortedNumericDocValuesField("sndv", 2)); + doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text1"))); + doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2"))); + iw.addDocument(doc); + doc = new Document(); + doc.add(new SortedNumericDocValuesField("sndv", 2)); + doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text11"))); + doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22"))); + iw.addDocument(doc); + iw.forceMerge(1); + iw.close(); + + DirectoryReader ir = maybeWrapWithMergingReader(DirectoryReader.open(directory)); + TestUtil.checkReader(ir); + assertEquals(1, ir.leaves().size()); + + // Star tree documents + /** + * keyword1 keyword2 | [ sum, value_count, min, max[sndv]] , doc_count + [0, 0] | [2.0, 1.0, 2.0, 2.0, 1.0] + [1, 1] | [2.0, 1.0, 2.0, 2.0, 1.0] + [null, 0] | [1.0, 1.0, 1.0, 1.0, 1.0] + [null, 1] | [1.0, 1.0, 1.0, 1.0, 1.0] + [null, 0] | [3.0, 2.0, 1.0, 2.0, 2.0] + [null, 1] | [3.0, 2.0, 1.0, 2.0, 2.0] + [null, null] | [6.0, 4.0, 1.0, 2.0, 4.0] + [null, null] | [2.0, 2.0, 1.0, 1.0, 2.0] + */ + StarTreeDocument[] expectedStarTreeDocuments = new StarTreeDocument[8]; + expectedStarTreeDocuments[0] = new StarTreeDocument(new Long[] { 0L, 0L }, new Double[] { 2.0, 1.0, 2.0, 2.0, 1.0 }); + expectedStarTreeDocuments[1] = new StarTreeDocument(new Long[] { 1L, 1L }, new Double[] { 2.0, 1.0, 2.0, 2.0, 1.0 }); + expectedStarTreeDocuments[2] = new StarTreeDocument(new Long[] { null, 0L }, new Double[] { 1.0, 1.0, 1.0, 1.0, 1.0 }); + expectedStarTreeDocuments[3] = new StarTreeDocument(new Long[] { null, 1L }, new Double[] { 1.0, 1.0, 1.0, 1.0, 1.0 }); + expectedStarTreeDocuments[4] = new StarTreeDocument(new Long[] { null, 0L }, new Double[] { 3.0, 2.0, 1.0, 2.0, 2.0 }); + expectedStarTreeDocuments[5] = new StarTreeDocument(new Long[] { null, 1L }, new Double[] { 3.0, 2.0, 1.0, 2.0, 2.0 }); + expectedStarTreeDocuments[6] = new StarTreeDocument(new Long[] { null, null }, new Double[] { 6.0, 4.0, 1.0, 2.0, 4.0 }); + expectedStarTreeDocuments[7] = new StarTreeDocument(new Long[] { null, null }, new Double[] { 2.0, 2.0, 1.0, 1.0, 2.0 }); + + for (LeafReaderContext context : ir.leaves()) { + SegmentReader reader = Lucene.segmentReader(context.reader()); + CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader(); + List compositeIndexFields = starTreeDocValuesReader.getCompositeIndexFields(); + + for (CompositeIndexFieldInfo compositeIndexFieldInfo : compositeIndexFields) { + StarTreeValues starTreeValues = (StarTreeValues) starTreeDocValuesReader.getCompositeIndexValues(compositeIndexFieldInfo); + StarTreeDocument[] starTreeDocuments = StarTreeTestUtils.getSegmentsStarTreeDocuments( + List.of(starTreeValues), + List.of( + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.LONG, + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.LONG + ), + Integer.parseInt(starTreeValues.getAttributes().get(STAR_TREE_DOCS_COUNT)) + ); + assertStarTreeDocuments(starTreeDocuments, expectedStarTreeDocuments); + } + } + ir.close(); + directory.close(); + } + + public void testStarKeywordDocValuesWithMissingDocsInSegment() throws IOException { + Directory directory = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig(null); + conf.setMergePolicy(newLogMergePolicy()); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); + Document doc = new Document(); + doc.add(new SortedNumericDocValuesField("sndv", 1)); + iw.addDocument(doc); + doc = new Document(); + doc.add(new SortedNumericDocValuesField("sndv", 1)); + iw.addDocument(doc); + iw.forceMerge(1); + doc = new Document(); + doc.add(new SortedNumericDocValuesField("sndv", 2)); + doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text1"))); + doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2"))); + iw.addDocument(doc); + doc = new Document(); + doc.add(new SortedNumericDocValuesField("sndv", 2)); + doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text11"))); + doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22"))); + iw.addDocument(doc); + iw.forceMerge(1); + iw.close(); + + DirectoryReader ir = maybeWrapWithMergingReader(DirectoryReader.open(directory)); + TestUtil.checkReader(ir); + assertEquals(1, ir.leaves().size()); + + // Star tree documents + /** + * keyword1 keyword2 | [ sum, value_count, min, max[sndv]] , doc_count + [0, 0] | [2.0, 1.0, 2.0, 2.0, 1.0] + [1, 1] | [2.0, 1.0, 2.0, 2.0, 1.0] + [null, null] | [2.0, 2.0, 1.0, 1.0, 2.0] // This is for missing doc + [null, 0] | [2.0, 1.0, 2.0, 2.0, 1.0] + [null, 1] | [2.0, 1.0, 2.0, 2.0, 1.0] + [null, null] | [2.0, 2.0, 1.0, 1.0, 2.0] + [null, null] | [6.0, 4.0, 1.0, 2.0, 4.0] // This is star document + */ + StarTreeDocument[] expectedStarTreeDocuments = new StarTreeDocument[7]; + expectedStarTreeDocuments[0] = new StarTreeDocument(new Long[] { 0L, 0L }, new Double[] { 2.0, 1.0, 2.0, 2.0, 1.0 }); + expectedStarTreeDocuments[1] = new StarTreeDocument(new Long[] { 1L, 1L }, new Double[] { 2.0, 1.0, 2.0, 2.0, 1.0 }); + expectedStarTreeDocuments[2] = new StarTreeDocument(new Long[] { null, null }, new Double[] { 2.0, 2.0, 1.0, 1.0, 2.0 }); + expectedStarTreeDocuments[3] = new StarTreeDocument(new Long[] { null, 0L }, new Double[] { 2.0, 1.0, 2.0, 2.0, 1.0 }); + expectedStarTreeDocuments[4] = new StarTreeDocument(new Long[] { null, 1L }, new Double[] { 2.0, 1.0, 2.0, 2.0, 1.0 }); + expectedStarTreeDocuments[5] = new StarTreeDocument(new Long[] { null, null }, new Double[] { 2.0, 2.0, 1.0, 1.0, 2.0 }); + expectedStarTreeDocuments[6] = new StarTreeDocument(new Long[] { null, null }, new Double[] { 6.0, 4.0, 1.0, 2.0, 4.0 }); + + for (LeafReaderContext context : ir.leaves()) { + SegmentReader reader = Lucene.segmentReader(context.reader()); + CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader(); + List compositeIndexFields = starTreeDocValuesReader.getCompositeIndexFields(); + + for (CompositeIndexFieldInfo compositeIndexFieldInfo : compositeIndexFields) { + StarTreeValues starTreeValues = (StarTreeValues) starTreeDocValuesReader.getCompositeIndexValues(compositeIndexFieldInfo); + StarTreeDocument[] starTreeDocuments = StarTreeTestUtils.getSegmentsStarTreeDocuments( + List.of(starTreeValues), + List.of( + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.LONG, + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.LONG + ), + Integer.parseInt(starTreeValues.getAttributes().get(STAR_TREE_DOCS_COUNT)) + ); + assertStarTreeDocuments(starTreeDocuments, expectedStarTreeDocuments); + } + } + ir.close(); + directory.close(); + } + + public void testStarKeywordDocValuesWithMissingDocsInAllSegments() throws IOException { + Directory directory = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig(null); + conf.setMergePolicy(newLogMergePolicy()); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); + Document doc = new Document(); + doc.add(new SortedNumericDocValuesField("sndv", 1)); + iw.addDocument(doc); + doc = new Document(); + doc.add(new SortedNumericDocValuesField("sndv", 1)); + iw.addDocument(doc); + iw.forceMerge(1); + doc = new Document(); + doc.add(new SortedNumericDocValuesField("sndv", 2)); + iw.addDocument(doc); + doc = new Document(); + doc.add(new SortedNumericDocValuesField("sndv", 2)); + iw.addDocument(doc); + iw.forceMerge(1); + iw.close(); + + DirectoryReader ir = maybeWrapWithMergingReader(DirectoryReader.open(directory)); + TestUtil.checkReader(ir); + assertEquals(1, ir.leaves().size()); + + // Star tree documents + /** + * keyword1 keyword2 | [ sum, value_count, min, max[sndv]] , doc_count + [null, null] | [6.0, 4.0, 1.0, 2.0, 4.0] + + */ + StarTreeDocument[] expectedStarTreeDocuments = new StarTreeDocument[1]; + expectedStarTreeDocuments[0] = new StarTreeDocument(new Long[] { null, null }, new Double[] { 6.0, 4.0, 1.0, 2.0, 4.0 }); + + for (LeafReaderContext context : ir.leaves()) { + SegmentReader reader = Lucene.segmentReader(context.reader()); + CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader(); + List compositeIndexFields = starTreeDocValuesReader.getCompositeIndexFields(); + + for (CompositeIndexFieldInfo compositeIndexFieldInfo : compositeIndexFields) { + StarTreeValues starTreeValues = (StarTreeValues) starTreeDocValuesReader.getCompositeIndexValues(compositeIndexFieldInfo); + StarTreeDocument[] starTreeDocuments = StarTreeTestUtils.getSegmentsStarTreeDocuments( + List.of(starTreeValues), + List.of( + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.LONG, + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.LONG + ), + Integer.parseInt(starTreeValues.getAttributes().get(STAR_TREE_DOCS_COUNT)) + ); + assertStarTreeDocuments(starTreeDocuments, expectedStarTreeDocuments); + } + } + ir.close(); + directory.close(); + } + + public void testStarKeywordDocValuesWithMissingDocsInMixedSegments() throws IOException { + Directory directory = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig(null); + conf.setMergePolicy(newLogMergePolicy()); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); + Document doc = new Document(); + doc.add(new SortedNumericDocValuesField("sndv", 1)); + iw.addDocument(doc); + doc = new Document(); + doc.add(new SortedNumericDocValuesField("sndv", 1)); + iw.addDocument(doc); + iw.forceMerge(1); + doc = new Document(); + doc.add(new SortedNumericDocValuesField("sndv", 2)); + doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text1"))); + iw.addDocument(doc); + doc = new Document(); + doc.add(new SortedNumericDocValuesField("sndv", 2)); + iw.addDocument(doc); + iw.forceMerge(1); + iw.close(); + + DirectoryReader ir = maybeWrapWithMergingReader(DirectoryReader.open(directory)); + TestUtil.checkReader(ir); + assertEquals(1, ir.leaves().size()); + + // Star tree documents + /** + * keyword1 keyword2 | [ sum, value_count, min, max[sndv]] , doc_count + [0, 0] | [2.0, 1.0, 2.0, 2.0, 1.0] + [1, 1] | [2.0, 1.0, 2.0, 2.0, 1.0] + [null, 0] | [1.0, 1.0, 1.0, 1.0, 1.0] + [null, 1] | [1.0, 1.0, 1.0, 1.0, 1.0] + [null, 0] | [3.0, 2.0, 1.0, 2.0, 2.0] + [null, 1] | [3.0, 2.0, 1.0, 2.0, 2.0] + [null, null] | [6.0, 4.0, 1.0, 2.0, 4.0] + [null, null] | [2.0, 2.0, 1.0, 1.0, 2.0] + */ + StarTreeDocument[] expectedStarTreeDocuments = new StarTreeDocument[3]; + expectedStarTreeDocuments[0] = new StarTreeDocument(new Long[] { 0L, null }, new Double[] { 2.0, 1.0, 2.0, 2.0, 1.0 }); + expectedStarTreeDocuments[1] = new StarTreeDocument(new Long[] { null, null }, new Double[] { 4.0, 3.0, 1.0, 2.0, 3.0 }); + expectedStarTreeDocuments[2] = new StarTreeDocument(new Long[] { null, null }, new Double[] { 6.0, 4.0, 1.0, 2.0, 4.0 }); + + for (LeafReaderContext context : ir.leaves()) { + SegmentReader reader = Lucene.segmentReader(context.reader()); + CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader(); + List compositeIndexFields = starTreeDocValuesReader.getCompositeIndexFields(); + + for (CompositeIndexFieldInfo compositeIndexFieldInfo : compositeIndexFields) { + StarTreeValues starTreeValues = (StarTreeValues) starTreeDocValuesReader.getCompositeIndexValues(compositeIndexFieldInfo); + StarTreeDocument[] starTreeDocuments = StarTreeTestUtils.getSegmentsStarTreeDocuments( + List.of(starTreeValues), + List.of( + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.LONG, + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.LONG + ), + Integer.parseInt(starTreeValues.getAttributes().get(STAR_TREE_DOCS_COUNT)) + ); + assertStarTreeDocuments(starTreeDocuments, expectedStarTreeDocuments); + } + } + ir.close(); + directory.close(); + } + + @Override + protected XContentBuilder getMapping() throws IOException { + return topMapping(b -> { + b.startObject("composite"); + b.startObject("startree"); + b.field("type", "star_tree"); + b.startObject("config"); + b.field("max_leaf_docs", 1); + b.startArray("ordered_dimensions"); + b.startObject(); + b.field("name", "keyword1"); + b.endObject(); + b.startObject(); + b.field("name", "keyword2"); + b.endObject(); + b.endArray(); + b.startArray("metrics"); + b.startObject(); + b.field("name", "sndv"); + b.startArray("stats"); + b.value("sum"); + b.value("value_count"); + b.value("avg"); + b.value("min"); + b.value("max"); + b.endArray(); + b.endObject(); + b.endArray(); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("properties"); + b.startObject("sndv"); + b.field("type", "integer"); + b.endObject(); + b.startObject("keyword1"); + b.field("type", "keyword"); + b.endObject(); + b.startObject("keyword2"); + b.field("type", "keyword"); + b.endObject(); + b.endObject(); + }); + } +} diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeTestUtils.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeTestUtils.java index dc8b3320f3de2..44e40f1db4cc8 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeTestUtils.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeTestUtils.java @@ -99,6 +99,7 @@ public static StarTreeDocument getStarTreeDocument( ) throws IOException { Long[] dims = new Long[dimensionReaders.length]; int i = 0; + for (SequentialDocValuesIterator dimensionDocValueIterator : dimensionReaders) { dimensionDocValueIterator.nextEntry(currentDocId); Long val = dimensionDocValueIterator.value(currentDocId); @@ -117,6 +118,9 @@ public static StarTreeDocument getStarTreeDocument( public static Double toAggregatorValueType(Long value, FieldValueConverter fieldValueConverter) { try { + if (value == null) { + return 0.0; + } return fieldValueConverter.toDoubleValue(value); } catch (Exception e) { throw new IllegalStateException("Cannot convert " + value + " to sortable aggregation type", e); diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilderTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilderTests.java index 012d04c575f55..ac729f6392f63 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilderTests.java @@ -201,7 +201,7 @@ public List getStarTreeDocuments() { @Override public Long getDimensionValue(int docId, int dimensionId) throws IOException { - return 0l; + return 0L; } @Override diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BuilderTestsUtils.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BuilderTestsUtils.java index cc6c1758697dd..077bf0422ab50 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BuilderTestsUtils.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BuilderTestsUtils.java @@ -18,11 +18,13 @@ import org.apache.lucene.index.SegmentReadState; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.index.VectorSimilarityFunction; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.InfoStream; import org.apache.lucene.util.Version; import org.opensearch.index.codec.composite.LuceneDocValuesProducerFactory; @@ -150,6 +152,65 @@ public long cost() { }; } + public static SortedSetDocValues getSortedSetMock(List dimList, List docsWithField) { + return getSortedSetMock(dimList, docsWithField, 1); + } + + public static SortedSetDocValues getSortedSetMock(List dimList, List docsWithField, int valueCount) { + return new SortedSetDocValues() { + int index = -1; + + @Override + public long nextOrd() throws IOException { + return dimList.get(index); + } + + @Override + public int docValueCount() { + return 1; + } + + @Override + public BytesRef lookupOrd(long l) throws IOException { + return new BytesRef("dummy" + l); + } + + @Override + public long getValueCount() { + return valueCount; + } + + @Override + public boolean advanceExact(int target) { + return false; + } + + @Override + public int docID() { + return index; + } + + @Override + public int nextDoc() { + if (index == docsWithField.size() - 1) { + return NO_MORE_DOCS; + } + index++; + return docsWithField.get(index); + } + + @Override + public int advance(int target) { + return 0; + } + + @Override + public long cost() { + return 0; + } + }; + } + public static void validateStarTree( InMemoryTreeNode root, int totalDimensions, @@ -386,7 +447,7 @@ public static void validateStarTreeFileFormats( public static SegmentReadState getReadState( int numDocs, - List dimensionFields, + Map dimensionFields, List metrics, StarTreeField compositeField, SegmentWriteState writeState, @@ -401,7 +462,7 @@ public static SegmentReadState getReadState( FieldInfo[] fields = new FieldInfo[dimensionFields.size() + numMetrics]; int i = 0; - for (String dimension : dimensionFields) { + for (String dimension : dimensionFields.keySet()) { fields[i] = new FieldInfo( fullyQualifiedFieldNameForStarTreeDimensionsDocValues(compositeField.getName(), dimension), i, @@ -409,7 +470,7 @@ public static SegmentReadState getReadState( false, true, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, - DocValuesType.SORTED_NUMERIC, + dimensionFields.get(dimension), -1, Collections.emptyMap(), 0, diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuildMetricTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuildMetricTests.java index c4d6fe6f19812..95adae9335740 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuildMetricTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuildMetricTests.java @@ -52,6 +52,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -591,13 +592,19 @@ public void test_build_multipleStarTrees() throws IOException { metaOut.close(); dataOut.close(); + LinkedHashMap fieldsMap = new LinkedHashMap<>(); + fieldsMap.put("field1", DocValuesType.SORTED_NUMERIC); + fieldsMap.put("field3", DocValuesType.SORTED_NUMERIC); + fieldsMap.put("field5", DocValuesType.SORTED_NUMERIC); + fieldsMap.put("field8", DocValuesType.SORTED_NUMERIC); + StarTreeMetadata starTreeMetadata = new StarTreeMetadata( "test", STAR_TREE, mock(IndexInput.class), VERSION_CURRENT, builder.numStarTreeNodes, - List.of("field1", "field3", "field5", "field8"), + fieldsMap, List.of( new Metric("field2", List.of(MetricStat.SUM)), new Metric("field4", List.of(MetricStat.SUM)), @@ -614,13 +621,18 @@ public void test_build_multipleStarTrees() throws IOException { 330 ); + LinkedHashMap fieldsMap1 = new LinkedHashMap<>(); + fieldsMap1.put("fieldC", DocValuesType.SORTED_NUMERIC); + fieldsMap1.put("fieldB", DocValuesType.SORTED_NUMERIC); + fieldsMap1.put("fieldL", DocValuesType.SORTED_NUMERIC); + StarTreeMetadata starTreeMetadata2 = new StarTreeMetadata( "test", STAR_TREE, mock(IndexInput.class), VERSION_CURRENT, builder.numStarTreeNodes, - List.of("fieldC", "fieldB", "fieldL"), + fieldsMap1, List.of(new Metric("fieldI", List.of(MetricStat.SUM))), 7, 27, @@ -631,9 +643,8 @@ public void test_build_multipleStarTrees() throws IOException { 1287 ); - List totalDimensionFields = new ArrayList<>(); - totalDimensionFields.addAll(starTreeMetadata.getDimensionFields()); - totalDimensionFields.addAll(starTreeMetadata2.getDimensionFields()); + LinkedHashMap totalDimensionFields = new LinkedHashMap<>(starTreeMetadata.getDimensionFields()); + totalDimensionFields.putAll(starTreeMetadata2.getDimensionFields()); List metrics = new ArrayList<>(); metrics.addAll(starTreeMetadata.getMetrics()); diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java index a92ac39cb7020..440268f1f803c 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java @@ -9,15 +9,18 @@ package org.opensearch.index.compositeindex.datacube.startree.builder; import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.EmptyDocValuesProducer; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.store.IndexInput; import org.opensearch.index.codec.composite.LuceneDocValuesConsumerFactory; import org.opensearch.index.codec.composite.composite912.Composite912DocValuesFormat; import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.KeywordDimension; import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.NumericDimension; @@ -27,17 +30,20 @@ import org.opensearch.index.compositeindex.datacube.startree.fileformats.meta.StarTreeMetadata; import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedSetStarTreeValuesIterator; import java.io.IOException; import java.util.ArrayList; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import static org.opensearch.index.compositeindex.datacube.startree.builder.BuilderTestsUtils.getSortedNumericMock; +import static org.opensearch.index.compositeindex.datacube.startree.builder.BuilderTestsUtils.getSortedSetMock; import static org.opensearch.index.compositeindex.datacube.startree.builder.BuilderTestsUtils.validateStarTree; import static org.opensearch.index.compositeindex.datacube.startree.fileformats.StarTreeWriter.VERSION_CURRENT; import static org.opensearch.index.mapper.CompositeMappedFieldType.CompositeFieldType.STAR_TREE; @@ -124,14 +130,16 @@ public void testFlushFlow() throws IOException { metaOut.close(); dataOut.close(); docValuesConsumer.close(); - + LinkedHashMap docValues = new LinkedHashMap<>(); + docValues.put("field1", DocValuesType.SORTED_NUMERIC); + docValues.put("field3", DocValuesType.SORTED_NUMERIC); StarTreeMetadata starTreeMetadata = new StarTreeMetadata( "sf", STAR_TREE, mock(IndexInput.class), VERSION_CURRENT, builder.numStarTreeNodes, - List.of("field1", "field3"), + docValues, List.of(new Metric("field2", List.of(MetricStat.SUM, MetricStat.VALUE_COUNT, MetricStat.AVG))), 6, builder.numStarTreeDocs, @@ -222,13 +230,16 @@ public void testFlushFlowDimsReverse() throws IOException { dataOut.close(); docValuesConsumer.close(); + LinkedHashMap docValues = new LinkedHashMap<>(); + docValues.put("field1", DocValuesType.SORTED_NUMERIC); + docValues.put("field3", DocValuesType.SORTED_NUMERIC); StarTreeMetadata starTreeMetadata = new StarTreeMetadata( "sf", STAR_TREE, mock(IndexInput.class), VERSION_CURRENT, builder.numStarTreeNodes, - List.of("field1", "field3"), + docValues, List.of(new Metric("field2", List.of(MetricStat.SUM, MetricStat.VALUE_COUNT, MetricStat.AVG))), 6, builder.numStarTreeDocs, @@ -322,7 +333,10 @@ public void testFlushFlowBuild() throws IOException { dataOut.close(); docValuesConsumer.close(); - StarTreeMetadata starTreeMetadata = getStarTreeMetadata(List.of("field1", "field3"), 100, 1, 6699); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", DocValuesType.SORTED_NUMERIC); + map.put("field3", DocValuesType.SORTED_NUMERIC); + StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 100, 1, 6699); validateStarTreeFileFormats( builder.getRootNode(), @@ -396,6 +410,115 @@ public void testFlushFlowWithTimestamps() throws IOException { validateStarTree(builder.getRootNode(), 3, 10, builder.getStarTreeDocuments()); } + public void testFlushFlowForKeywords() throws IOException { + List dimList = List.of(0L, 1L, 2L, 3L, 4L, 5L); + List docsWithField = List.of(0, 1, 2, 3, 4, 5); + List dimList2 = List.of(0L, 1L, 2L, 3L, 4L, 5L); + List docsWithField2 = List.of(0, 1, 2, 3, 4, 5); + + List metricsList = List.of( + getLongFromDouble(0.0), + getLongFromDouble(10.0), + getLongFromDouble(20.0), + getLongFromDouble(30.0), + getLongFromDouble(40.0), + getLongFromDouble(50.0) + ); + List metricsWithField = List.of(0, 1, 2, 3, 4, 5); + + compositeField = getStarTreeFieldWithKeywordField(); + SortedSetStarTreeValuesIterator d1sndv = new SortedSetStarTreeValuesIterator(getSortedSetMock(dimList, docsWithField)); + SortedSetStarTreeValuesIterator d2sndv = new SortedSetStarTreeValuesIterator(getSortedSetMock(dimList2, docsWithField2)); + SortedNumericStarTreeValuesIterator m1sndv = new SortedNumericStarTreeValuesIterator( + getSortedNumericMock(metricsList, metricsWithField) + ); + SortedNumericStarTreeValuesIterator m2sndv = new SortedNumericStarTreeValuesIterator( + getSortedNumericMock(metricsList, metricsWithField) + ); + + writeState = getWriteState(6, writeState.segmentInfo.getId()); + builder = getStarTreeBuilder(metaOut, dataOut, compositeField, writeState, mapperService); + SequentialDocValuesIterator[] dimDvs = { new SequentialDocValuesIterator(d1sndv), new SequentialDocValuesIterator(d2sndv) }; + Iterator starTreeDocumentIterator = builder.sortAndAggregateSegmentDocuments( + dimDvs, + List.of(new SequentialDocValuesIterator(m1sndv), new SequentialDocValuesIterator(m2sndv)) + ); + /** + * Asserting following dim / metrics [ dim1, dim2 / Sum [metric], count [metric] ] + [0, 0] | [0.0, 1] + [1, 1] | [10.0, 1] + [2, 2] | [20.0, 1] + [3, 3] | [30.0, 1] + [4, 4] | [40.0, 1] + [5, 5] | [50.0, 1] + */ + + SegmentWriteState w = getWriteState(DocIdSetIterator.NO_MORE_DOCS, writeState.segmentInfo.getId()); + this.docValuesConsumer = LuceneDocValuesConsumerFactory.getDocValuesConsumerForCompositeCodec( + w, + Composite912DocValuesFormat.DATA_DOC_VALUES_CODEC, + Composite912DocValuesFormat.DATA_DOC_VALUES_EXTENSION, + Composite912DocValuesFormat.META_DOC_VALUES_CODEC, + Composite912DocValuesFormat.META_DOC_VALUES_EXTENSION + ); + Map dv = new LinkedHashMap<>(); + dv.put("field1", getSortedSetMock(dimList, docsWithField)); + dv.put("field3", getSortedSetMock(dimList2, docsWithField2)); + builder.setFlushSortedSetDocValuesMap(dv); + builder.build(starTreeDocumentIterator, new AtomicInteger(), docValuesConsumer); + + List starTreeDocuments = builder.getStarTreeDocuments(); + int count = 0; + for (StarTreeDocument starTreeDocument : starTreeDocuments) { + count++; + if (starTreeDocument.dimensions[1] != null) { + assertEquals( + starTreeDocument.dimensions[0] == null + ? starTreeDocument.dimensions[1] * 1 * 10.0 + : starTreeDocument.dimensions[0] * 10, + starTreeDocument.metrics[0] + ); + assertEquals(1L, starTreeDocument.metrics[1]); + } else { + assertEquals(150D, starTreeDocument.metrics[0]); + assertEquals(6L, starTreeDocument.metrics[1]); + } + } + assertEquals(13, count); + validateStarTree(builder.getRootNode(), 2, 1000, builder.getStarTreeDocuments()); + + metaOut.close(); + dataOut.close(); + docValuesConsumer.close(); + LinkedHashMap docValues = new LinkedHashMap<>(); + docValues.put("field1", DocValuesType.SORTED_SET); + docValues.put("field3", DocValuesType.SORTED_SET); + StarTreeMetadata starTreeMetadata = new StarTreeMetadata( + "sf", + STAR_TREE, + mock(IndexInput.class), + VERSION_CURRENT, + builder.numStarTreeNodes, + docValues, + List.of(new Metric("field2", List.of(MetricStat.SUM, MetricStat.VALUE_COUNT, MetricStat.AVG))), + 6, + builder.numStarTreeDocs, + 1000, + Set.of(), + getBuildMode(), + 0, + 264 + ); + + validateStarTreeFileFormats( + builder.getRootNode(), + builder.getStarTreeDocuments().size(), + starTreeMetadata, + builder.getStarTreeDocuments() + ); + + } + private StarTreeField getStarTreeFieldWithMultipleMetrics() { Dimension d1 = new NumericDimension("field1"); Dimension d2 = new NumericDimension("field3"); @@ -408,6 +531,18 @@ private StarTreeField getStarTreeFieldWithMultipleMetrics() { return new StarTreeField("sf", dims, metrics, c); } + private StarTreeField getStarTreeFieldWithKeywordField() { + Dimension d1 = new KeywordDimension("field1"); + Dimension d2 = new KeywordDimension("field3"); + Metric m1 = new Metric("field2", List.of(MetricStat.SUM)); + Metric m2 = new Metric("field2", List.of(MetricStat.VALUE_COUNT)); + Metric m3 = new Metric("field2", List.of(MetricStat.AVG)); + List dims = List.of(d1, d2); + List metrics = List.of(m1, m2, m3); + StarTreeFieldConfiguration c = new StarTreeFieldConfiguration(1000, new HashSet<>(), getBuildMode()); + return new StarTreeField("sf", dims, metrics, c); + } + private static DocValuesProducer getDocValuesProducer(SortedNumericDocValues sndv) { return new EmptyDocValuesProducer() { @Override diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java index 00e53534a7606..be16961e781db 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java @@ -9,8 +9,10 @@ package org.opensearch.index.compositeindex.datacube.startree.builder; import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.opensearch.common.settings.Settings; import org.opensearch.index.codec.composite.LuceneDocValuesConsumerFactory; @@ -26,6 +28,7 @@ import org.opensearch.index.compositeindex.datacube.startree.fileformats.meta.StarTreeMetadata; import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedSetStarTreeValuesIterator; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.StarTreeValuesIterator; import org.opensearch.index.mapper.ContentPath; import org.opensearch.index.mapper.DocumentMapper; @@ -49,6 +52,7 @@ import static org.opensearch.index.compositeindex.CompositeIndexConstants.SEGMENT_DOCS_COUNT; import static org.opensearch.index.compositeindex.datacube.startree.builder.BuilderTestsUtils.getSortedNumericMock; +import static org.opensearch.index.compositeindex.datacube.startree.builder.BuilderTestsUtils.getSortedSetMock; import static org.opensearch.index.compositeindex.datacube.startree.builder.BuilderTestsUtils.traverseStarTree; import static org.opensearch.index.compositeindex.datacube.startree.builder.BuilderTestsUtils.validateStarTree; import static org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues; @@ -348,8 +352,10 @@ public void testMergeFlowWithSum() throws IOException { metaOut.close(); dataOut.close(); docValuesConsumer.close(); - - StarTreeMetadata starTreeMetadata = getStarTreeMetadata(List.of("field1", "field3"), 6, 1000, 264); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", DocValuesType.SORTED_NUMERIC); + map.put("field3", DocValuesType.SORTED_NUMERIC); + StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 6, 1000, 264); validateStarTreeFileFormats( builder.getRootNode(), @@ -421,8 +427,10 @@ public void testMergeFlowWithCount() throws IOException { metaOut.close(); dataOut.close(); docValuesConsumer.close(); - - StarTreeMetadata starTreeMetadata = getStarTreeMetadata(List.of("field1", "field3"), 6, 1000, 264); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", DocValuesType.SORTED_NUMERIC); + map.put("field3", DocValuesType.SORTED_NUMERIC); + StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 6, 1000, 264); validateStarTreeFileFormats( builder.getRootNode(), @@ -568,8 +576,10 @@ public void testMergeFlowWithMissingDocs() throws IOException { metaOut.close(); dataOut.close(); docValuesConsumer.close(); - - StarTreeMetadata starTreeMetadata = getStarTreeMetadata(List.of("field1", "field3"), 10, 1000, 363); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", DocValuesType.SORTED_NUMERIC); + map.put("field3", DocValuesType.SORTED_NUMERIC); + StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 10, 1000, 363); validateStarTreeFileFormats( builder.getRootNode(), @@ -656,8 +666,10 @@ public void testMergeFlowWithMissingDocsWithZero() throws IOException { metaOut.close(); dataOut.close(); docValuesConsumer.close(); - - StarTreeMetadata starTreeMetadata = getStarTreeMetadata(List.of("field1", "field3"), 6, 1000, 231); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", DocValuesType.SORTED_NUMERIC); + map.put("field3", DocValuesType.SORTED_NUMERIC); + StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 6, 1000, 231); validateStarTreeFileFormats( builder.getRootNode(), @@ -747,8 +759,10 @@ public void testMergeFlowWithMissingDocsWithZeroComplexCase() throws IOException metaOut.close(); dataOut.close(); docValuesConsumer.close(); - - StarTreeMetadata starTreeMetadata = getStarTreeMetadata(List.of("field1", "field3"), 7, 1000, 231); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", DocValuesType.SORTED_NUMERIC); + map.put("field3", DocValuesType.SORTED_NUMERIC); + StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 7, 1000, 231); validateStarTreeFileFormats( builder.getRootNode(), @@ -834,8 +848,10 @@ public void testMergeFlowWithMissingDocsInSecondDim() throws IOException { metaOut.close(); dataOut.close(); docValuesConsumer.close(); - - StarTreeMetadata starTreeMetadata = getStarTreeMetadata(List.of("field1", "field3"), 10, 1000, 363); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", DocValuesType.SORTED_NUMERIC); + map.put("field3", DocValuesType.SORTED_NUMERIC); + StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 10, 1000, 363); validateStarTreeFileFormats( builder.getRootNode(), @@ -919,8 +935,10 @@ public void testMergeFlowWithDocsMissingAtTheEnd() throws IOException { metaOut.close(); dataOut.close(); docValuesConsumer.close(); - - StarTreeMetadata starTreeMetadata = getStarTreeMetadata(List.of("field1", "field3"), 10, 1000, 363); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", DocValuesType.SORTED_NUMERIC); + map.put("field3", DocValuesType.SORTED_NUMERIC); + StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 10, 1000, 363); validateStarTreeFileFormats( builder.getRootNode(), @@ -992,8 +1010,10 @@ public void testMergeFlowWithEmptyFieldsInOneSegment() throws IOException { metaOut.close(); dataOut.close(); docValuesConsumer.close(); - - StarTreeMetadata starTreeMetadata = getStarTreeMetadata(List.of("field1", "field3"), 6, 1000, 264); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", DocValuesType.SORTED_NUMERIC); + map.put("field3", DocValuesType.SORTED_NUMERIC); + StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 6, 1000, 264); validateStarTreeFileFormats( builder.getRootNode(), @@ -1391,8 +1411,10 @@ public void testMergeFlowWithDifferentDocsFromSegments() throws IOException { metaOut.close(); dataOut.close(); docValuesConsumer.close(); - - StarTreeMetadata starTreeMetadata = getStarTreeMetadata(List.of("field1", "field3"), 9, 1000, 330); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", DocValuesType.SORTED_NUMERIC); + map.put("field3", DocValuesType.SORTED_NUMERIC); + StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 9, 1000, 330); validateStarTreeFileFormats( builder.getRootNode(), @@ -1776,6 +1798,110 @@ public void testMergeFlowWithTimestamps() throws IOException { ); } + public void testMergeFlowWithKeywords() throws IOException { + List dimList = List.of(0L, 1L, 2L, 3L, 4L, 5L, 6L); + List docsWithField = List.of(0, 1, 2, 3, 4, 5, 6); + List dimList2 = List.of(0L, 1L, 2L, 3L, 4L, 5L, -1L); + List docsWithField2 = List.of(0, 1, 2, 3, 4, 5, 6); + List metricsList1 = List.of( + getLongFromDouble(0.0), + getLongFromDouble(10.0), + getLongFromDouble(20.0), + getLongFromDouble(30.0), + getLongFromDouble(40.0), + getLongFromDouble(50.0), + getLongFromDouble(60.0) + ); + List metricsWithField1 = List.of(0, 1, 2, 3, 4, 5, 6); + List metricsList = List.of(0L, 1L, 2L, 3L, 4L, 5L, 6L); + List metricsWithField = List.of(0, 1, 2, 3, 4, 5, 6); + + List dimList3 = List.of(0L, 1L, 2L, 3L, -1L); + List docsWithField3 = List.of(0, 1, 2, 3, 4); + List dimList4 = List.of(0L, 1L, 2L, 3L, -1L); + List docsWithField4 = List.of(0, 1, 2, 3, 4); + List metricsList21 = List.of( + getLongFromDouble(0.0), + getLongFromDouble(10.0), + getLongFromDouble(20.0), + getLongFromDouble(30.0), + getLongFromDouble(40.0) + ); + List metricsWithField21 = List.of(0, 1, 2, 3, 4); + List metricsList2 = List.of(0L, 1L, 2L, 3L, 4L); + List metricsWithField2 = List.of(0, 1, 2, 3, 4); + + compositeField = getStarTreeFieldWithKeywords(); + StarTreeValues starTreeValues = getStarTreeValuesWithKeywords( + getSortedSetMock(dimList, docsWithField), + getSortedSetMock(dimList2, docsWithField2), + getSortedNumericMock(metricsList, metricsWithField), + getSortedNumericMock(metricsList1, metricsWithField1), + compositeField, + "6" + ); + + StarTreeValues starTreeValues2 = getStarTreeValuesWithKeywords( + getSortedSetMock(dimList3, docsWithField3), + getSortedSetMock(dimList4, docsWithField4), + getSortedNumericMock(metricsList2, metricsWithField2), + getSortedNumericMock(metricsList21, metricsWithField21), + compositeField, + "4" + ); + this.docValuesConsumer = LuceneDocValuesConsumerFactory.getDocValuesConsumerForCompositeCodec( + writeState, + Composite912DocValuesFormat.DATA_DOC_VALUES_CODEC, + Composite912DocValuesFormat.DATA_DOC_VALUES_EXTENSION, + Composite912DocValuesFormat.META_DOC_VALUES_CODEC, + Composite912DocValuesFormat.META_DOC_VALUES_EXTENSION + ); + builder = getStarTreeBuilder(metaOut, dataOut, compositeField, getWriteState(4, writeState.segmentInfo.getId()), mapperService); + // Initialize the mock MergeState within the method + + Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); + /** + [0, 0] | [5, 50.0] + [1, 1] | [7, 70.0] + [2, 2] | [9, 90.0] + [3, 3] | [11, 110.0] + [4, 4] | [4, 40.0] + [5, 5] | [5, 50.0] + */ + int count = 0; + builder.appendDocumentsToStarTree(starTreeDocumentIterator); + for (StarTreeDocument starTreeDocument : builder.getStarTreeDocuments()) { + count++; + if (count <= 4) { + assertEquals(starTreeDocument.dimensions[0] * 2, (long) starTreeDocument.metrics[0], 0); + assertEquals(starTreeDocument.dimensions[0] * 20.0, (double) starTreeDocument.metrics[1], 0); + } else { + assertEquals(starTreeDocument.dimensions[0], (long) starTreeDocument.metrics[0], 0); + assertEquals(starTreeDocument.dimensions[0] * 10.0, (double) starTreeDocument.metrics[1], 0); + } + } + assertEquals(6, count); + builder.build(starTreeDocumentIterator, new AtomicInteger(), docValuesConsumer); + validateStarTree(builder.getRootNode(), 4, 10, builder.getStarTreeDocuments()); + metaOut.close(); + dataOut.close(); + docValuesConsumer.close(); + + StarTreeMetadata starTreeMetadata = getStarTreeMetadata( + getStarTreeDimensionNames(compositeField.getDimensionsOrder()), + 6, + compositeField.getStarTreeConfig().maxLeafDocs(), + 264 + ); + + validateStarTreeFileFormats( + builder.getRootNode(), + builder.getStarTreeDocuments().size(), + starTreeMetadata, + builder.getStarTreeDocuments() + ); + } + private StarTreeValues getStarTreeValuesWithDates( SortedNumericDocValues dimList, SortedNumericDocValues dimList2, @@ -1857,6 +1983,93 @@ private StarTreeValues getStarTreeValues( return starTreeValues; } + private StarTreeValues getStarTreeValuesWithKeywords( + SortedSetDocValues dimList, + SortedSetDocValues dimList2, + SortedNumericDocValues metricsList, + SortedNumericDocValues metricsList1, + StarTreeField sf, + String number + ) { + SortedSetDocValues d1sndv = dimList; + SortedSetDocValues d2sndv = dimList2; + SortedNumericDocValues m1sndv = metricsList; + Map> dimDocIdSetIterators = Map.of( + "field1", + () -> new SortedSetStarTreeValuesIterator(d1sndv), + "field3", + () -> new SortedSetStarTreeValuesIterator(d2sndv) + ); + + Map> metricDocIdSetIterators = new LinkedHashMap<>(); + metricDocIdSetIterators.put( + fullyQualifiedFieldNameForStarTreeMetricsDocValues( + sf.getName(), + "field2", + sf.getMetrics().get(0).getMetrics().get(0).getTypeName() + ), + () -> new SortedNumericStarTreeValuesIterator(metricsList) + ); + metricDocIdSetIterators.put( + fullyQualifiedFieldNameForStarTreeMetricsDocValues( + sf.getName(), + "field2", + sf.getMetrics().get(0).getMetrics().get(1).getTypeName() + ), + () -> new SortedNumericStarTreeValuesIterator(metricsList1) + ); + StarTreeValues starTreeValues = new StarTreeValues( + sf, + null, + dimDocIdSetIterators, + metricDocIdSetIterators, + Map.of(CompositeIndexConstants.SEGMENT_DOCS_COUNT, number), + null + ); + return starTreeValues; + } + + private StarTreeValues getStarTreeValuesWithKeywords( + SortedSetDocValues dimList, + SortedSetDocValues dimList2, + SortedSetDocValues dimList4, + SortedSetDocValues dimList3, + SortedNumericDocValues metricsList, + SortedNumericDocValues metricsList1, + StarTreeField sf, + String number + ) { + Map> dimDocIdSetIterators = Map.of( + "field1_minute", + () -> new SortedSetStarTreeValuesIterator(dimList), + "field1_half-hour", + () -> new SortedSetStarTreeValuesIterator(dimList4), + "field1_hour", + () -> new SortedSetStarTreeValuesIterator(dimList2), + "field3", + () -> new SortedSetStarTreeValuesIterator(dimList3) + ); + Map> metricDocIdSetIterators = new LinkedHashMap<>(); + + metricDocIdSetIterators.put( + fullyQualifiedFieldNameForStarTreeMetricsDocValues( + sf.getName(), + "field2", + sf.getMetrics().get(0).getMetrics().get(0).getTypeName() + ), + () -> new SortedNumericStarTreeValuesIterator(metricsList) + ); + metricDocIdSetIterators.put( + fullyQualifiedFieldNameForStarTreeMetricsDocValues( + sf.getName(), + "field2", + sf.getMetrics().get(0).getMetrics().get(1).getTypeName() + ), + () -> new SortedNumericStarTreeValuesIterator(metricsList1) + ); + return new StarTreeValues(sf, null, dimDocIdSetIterators, metricDocIdSetIterators, Map.of(SEGMENT_DOCS_COUNT, number), null); + } + private StarTreeValues getStarTreeValues( List dimList1, List docsWithField1, diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java index 6733cac12f657..9c9beaea4f52c 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java @@ -16,6 +16,7 @@ import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.MergeState; import org.apache.lucene.index.SegmentReadState; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.VectorEncoding; @@ -31,6 +32,7 @@ import org.opensearch.index.compositeindex.datacube.DataCubeDateTimeUnit; import org.opensearch.index.compositeindex.datacube.DateDimension; import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.KeywordDimension; import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.NumericDimension; @@ -60,6 +62,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -87,6 +90,7 @@ public abstract class StarTreeBuilderTestCase extends OpenSearchTestCase { protected String dataFileName; protected String metaFileName; protected List dimensionsOrder; + protected MergeState mergeState; public StarTreeBuilderTestCase(StarTreeFieldConfiguration.StarTreeBuildMode buildMode) { this.buildMode = buildMode; @@ -155,6 +159,8 @@ public void setup() throws IOException { } writeState = getWriteState(5, UUID.randomUUID().toString().substring(0, 16).getBytes(StandardCharsets.UTF_8)); + mergeState = new MergeState(null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, false); + dataFileName = IndexFileNames.segmentFileName( writeState.segmentInfo.name, writeState.segmentSuffix, @@ -240,7 +246,7 @@ SegmentWriteState getWriteState(int numDocs, byte[] id) { return BuilderTestsUtils.getWriteState(numDocs, id, fieldsInfo, directory); } - SegmentReadState getReadState(int numDocs, List dimensionFields, List metrics) { + SegmentReadState getReadState(int numDocs, Map dimensionFields, List metrics) { return BuilderTestsUtils.getReadState(numDocs, dimensionFields, metrics, compositeField, writeState, directory); } @@ -248,10 +254,12 @@ protected Map getAttributes(int numSegmentDocs) { return Map.of(CompositeIndexConstants.SEGMENT_DOCS_COUNT, String.valueOf(numSegmentDocs)); } - protected List getStarTreeDimensionNames(List dimensionsOrder) { - List dimensionNames = new ArrayList<>(); + protected LinkedHashMap getStarTreeDimensionNames(List dimensionsOrder) { + LinkedHashMap dimensionNames = new LinkedHashMap<>(); for (Dimension dimension : dimensionsOrder) { - dimensionNames.addAll(dimension.getSubDimensionNames()); + for (String dimensionName : dimension.getSubDimensionNames()) { + dimensionNames.put(dimensionName, dimension.getDocValuesType()); + } } return dimensionNames; } @@ -320,7 +328,12 @@ protected long getLongFromDouble(double value) { return NumericUtils.doubleToSortableLong(value); } - protected StarTreeMetadata getStarTreeMetadata(List fields, int segmentAggregatedDocCount, int maxLeafDocs, int dataLength) { + protected StarTreeMetadata getStarTreeMetadata( + LinkedHashMap fields, + int segmentAggregatedDocCount, + int maxLeafDocs, + int dataLength + ) { return new StarTreeMetadata( "sf", STAR_TREE, @@ -339,6 +352,17 @@ protected StarTreeMetadata getStarTreeMetadata(List fields, int segmentA ); } + protected StarTreeField getStarTreeFieldWithKeywords() { + Dimension d1 = new KeywordDimension("field1"); + Dimension d2 = new KeywordDimension("field3"); + Metric m1 = new Metric("field2", List.of(MetricStat.VALUE_COUNT, MetricStat.SUM)); + List dims = List.of(d1, d2); + List metrics = List.of(m1); + StarTreeFieldConfiguration c = new StarTreeFieldConfiguration(10, new HashSet<>(), getBuildMode()); + StarTreeField sf = new StarTreeField("sf", dims, metrics, c); + return sf; + } + protected StarTreeField getStarTreeFieldWithDateDimension() { List intervals = new ArrayList<>(); intervals.add(new DateTimeUnitAdapter(Rounding.DateTimeUnit.MINUTES_OF_HOUR)); diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadataTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadataTests.java index c8636426449ad..cc91d69be97c1 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadataTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadataTests.java @@ -42,6 +42,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.UUID; @@ -181,8 +182,11 @@ public void test_starTreeMetadata() throws IOException { assertEquals(starTreeMetadata.getNumberOfNodes(), numberOfNodes); assertNotNull(starTreeMetadata); - for (int i = 0; i < dimensionsOrder.size(); i++) { - assertEquals(dimensionsOrder.get(i).getField(), starTreeMetadata.getDimensionFields().get(i)); + assertEquals(dimensionsOrder.size(), starTreeMetadata.dimensionFieldsToDocValuesMap.size()); + int k = 0; + for (Map.Entry entry : starTreeMetadata.dimensionFieldsToDocValuesMap.entrySet()) { + assertEquals(dimensionsOrder.get(k).getField(), entry.getKey()); + k++; } assertEquals(starTreeField.getMetrics().size(), starTreeMetadata.getMetrics().size()); diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeUtilsTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeUtilsTests.java index 9cca0b04e9ea4..7e438c18d9ab9 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeUtilsTests.java @@ -17,6 +17,7 @@ import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.UUID; @@ -43,7 +44,7 @@ public void testFullyQualifiedFieldNameForStarTreeMetricsDocValues() { public void testGetFieldInfoList() { List fieldNames = Arrays.asList("field1", "field2", "field3"); - FieldInfo[] actualFieldInfos = StarTreeUtils.getFieldInfoList(fieldNames); + FieldInfo[] actualFieldInfos = StarTreeUtils.getFieldInfoList(fieldNames, new HashMap<>()); for (int i = 0; i < fieldNames.size(); i++) { assertFieldInfos(actualFieldInfos[i], fieldNames.get(i), i); } diff --git a/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java index c35cf3fc1e591..77534b514a59a 100644 --- a/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java @@ -540,9 +540,14 @@ public void testCompositeFields() throws Exception { .endObject() .toString(); + Settings settings = Settings.builder() + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) + .build(); + IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - () -> createIndex("invalid").mapperService().documentMapperParser().parse("tweet", new CompressedXContent(mapping)) + () -> createIndex("invalid", settings).mapperService().documentMapperParser().parse("tweet", new CompressedXContent(mapping)) ); assertEquals( "star tree index is under an experimental feature and can be activated only by enabling opensearch.experimental.feature.composite_index.star_tree.enabled feature flag in the JVM options", @@ -552,10 +557,6 @@ public void testCompositeFields() throws Exception { final Settings starTreeEnabledSettings = Settings.builder().put(STAR_TREE_INDEX, "true").build(); FeatureFlags.initializeFeatureFlags(starTreeEnabledSettings); - Settings settings = Settings.builder() - .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) - .build(); DocumentMapper documentMapper = createIndex("test", settings).mapperService() .documentMapperParser() .parse("tweet", new CompressedXContent(mapping)); diff --git a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java index aac460bd5e332..8ec34b3eb660c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java @@ -672,6 +672,9 @@ private XContentBuilder getExpandedMappingWithJustAvg(String dim, String metric) b.startObject("size"); b.field("type", "integer"); b.endObject(); + b.startObject("keyword1"); + b.field("type", "keyword"); + b.endObject(); b.endObject(); }); } @@ -718,6 +721,7 @@ private XContentBuilder getMappingWithDuplicateFields(boolean isDuplicateDim, bo .field("type", "integer") .field("doc_values", true) .endObject() + .endObject() .endObject(); } catch (IOException e) { @@ -772,6 +776,9 @@ private XContentBuilder getExpandedMappingWithJustSum(String dim, String metric) b.startObject("size"); b.field("type", "integer"); b.endObject(); + b.startObject("keyword1"); + b.field("type", "keyword"); + b.endObject(); b.endObject(); }); } @@ -823,6 +830,9 @@ private XContentBuilder getExpandedMappingWithSumAndCount(String dim, String met b.startObject("size"); b.field("type", "integer"); b.endObject(); + b.startObject("keyword1"); + b.field("type", "keyword"); + b.endObject(); b.endObject(); }); } @@ -866,6 +876,9 @@ private XContentBuilder getMinMappingWithDateDims(boolean calendarIntervalsExcee b.startObject(); b.field("name", "metric_field"); b.endObject(); + b.startObject(); + b.field("name", "keyword1"); + b.endObject(); } b.endArray(); @@ -895,6 +908,9 @@ private XContentBuilder getMinMappingWithDateDims(boolean calendarIntervalsExcee b.startObject("metric_field"); b.field("type", "integer"); b.endObject(); + b.startObject("keyword1"); + b.field("type", "keyword"); + b.endObject(); b.endObject(); }); @@ -920,6 +936,9 @@ private XContentBuilder getMinMapping( b.startObject(); b.field("name", "status"); b.endObject(); + b.startObject(); + b.field("name", "keyword1"); + b.endObject(); b.endArray(); } if (!isEmptyMetrics) { @@ -951,6 +970,9 @@ private XContentBuilder getMinMapping( b.field("type", "integer"); b.endObject(); } + b.startObject("keyword1"); + b.field("type", "keyword"); + b.endObject(); b.endObject(); }); } @@ -1018,7 +1040,9 @@ private XContentBuilder getMinMappingWith2StarTrees() throws IOException { b.startObject("metric_field"); b.field("type", "integer"); b.endObject(); - + b.startObject("keyword1"); + b.field("type", "keyword"); + b.endObject(); b.endObject(); }); } @@ -1058,6 +1082,9 @@ private XContentBuilder getInvalidMapping( b.startObject(); b.field("name", "status"); b.endObject(); + b.startObject(); + b.field("name", "keyword1"); + b.endObject(); } b.endArray(); b.startArray("metrics"); @@ -1090,7 +1117,7 @@ private XContentBuilder getInvalidMapping( if (!invalidDimType) { b.field("type", "integer"); } else { - b.field("type", "keyword"); + b.field("type", "ip"); } b.endObject(); b.startObject("metric_field"); @@ -1100,6 +1127,9 @@ private XContentBuilder getInvalidMapping( b.field("type", "integer"); } b.endObject(); + b.startObject("keyword1"); + b.field("type", "keyword"); + b.endObject(); b.endObject(); }); } @@ -1132,6 +1162,9 @@ private XContentBuilder getInvalidMappingWithDv( b.startObject(); b.field("name", "status"); b.endObject(); + b.startObject(); + b.field("name", "keyword1"); + b.endObject(); } b.endArray(); b.startArray("metrics"); @@ -1168,6 +1201,9 @@ private XContentBuilder getInvalidMappingWithDv( b.field("doc_values", "true"); } b.endObject(); + b.startObject("keyword1"); + b.field("type", "keyword"); + b.endObject(); b.endObject(); }); } @@ -1224,6 +1260,9 @@ public void testEmptyName() { b.startObject("status"); b.field("type", "integer"); b.endObject(); + b.startObject("keyword1"); + b.field("type", "keyword"); + b.endObject(); b.endObject(); }))); assertThat(e.getMessage(), containsString("name cannot be empty string")); diff --git a/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java index 0c88154ca2b38..3b32e9e4ac6b7 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java @@ -17,7 +17,6 @@ import org.opensearch.core.common.Strings; import org.opensearch.index.IndexService; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; -import org.opensearch.index.codec.composite912.datacube.startree.StarTreeDocValuesFormatTests; import org.opensearch.index.compositeindex.CompositeIndexSettings; import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; import org.opensearch.index.mapper.CompositeMappedFieldType; @@ -26,6 +25,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.search.aggregations.AggregationBuilders; +import org.opensearch.search.aggregations.startree.StarTreeFilterTests; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.ReaderContext; @@ -55,7 +55,7 @@ public void testParseQueryToOriginalOrStarTreeQuery() throws IOException { .indices() .prepareCreate("test") .setSettings(settings) - .setMapping(StarTreeDocValuesFormatTests.getExpandedMapping()); + .setMapping(StarTreeFilterTests.getExpandedMapping(1, false)); createIndex("test", builder); IndicesService indicesService = getInstanceFromNode(IndicesService.class); diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java index 0327bd9990784..12e83cbbadd5d 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java @@ -90,7 +90,7 @@ protected Codec getCodec() { final Logger testLogger = LogManager.getLogger(MetricAggregatorTests.class); MapperService mapperService; try { - mapperService = StarTreeDocValuesFormatTests.createMapperService(StarTreeDocValuesFormatTests.getExpandedMapping()); + mapperService = StarTreeDocValuesFormatTests.createMapperService(StarTreeFilterTests.getExpandedMapping(1, false)); } catch (IOException e) { throw new RuntimeException(e); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java index f8eb71a40319a..b03cb5ac7bb9d 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java @@ -47,7 +47,7 @@ import java.util.List; import java.util.Map; -import static org.opensearch.index.codec.composite912.datacube.startree.StarTreeDocValuesFormatTests.topMapping; +import static org.opensearch.index.codec.composite912.datacube.startree.AbstractStarTreeDVFormatTests.topMapping; public class StarTreeFilterTests extends AggregatorTestCase { From e9f77e38998bf42a17e34392bfa6683b1ec837d6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 16:12:52 +0800 Subject: [PATCH 26/30] Bump org.apache.xmlbeans:xmlbeans from 5.2.1 to 5.2.2 in /plugins/ingest-attachment (#16612) * Bump org.apache.xmlbeans:xmlbeans in /plugins/ingest-attachment Bumps org.apache.xmlbeans:xmlbeans from 5.2.1 to 5.2.2. --- updated-dependencies: - dependency-name: org.apache.xmlbeans:xmlbeans dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: gaobinlong Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: gaobinlong --- CHANGELOG.md | 1 + plugins/ingest-attachment/build.gradle | 2 +- plugins/ingest-attachment/licenses/xmlbeans-5.2.1.jar.sha1 | 1 - plugins/ingest-attachment/licenses/xmlbeans-5.2.2.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/ingest-attachment/licenses/xmlbeans-5.2.1.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/xmlbeans-5.2.2.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 30b1d5908c1a7..e8dd188709cd2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.apis:google-api-services-compute` from v1-rev20240407-2.0.0 to v1-rev20241021-2.0.0 ([#16502](https://github.com/opensearch-project/OpenSearch/pull/16502), [#16548](https://github.com/opensearch-project/OpenSearch/pull/16548)) - Bump `com.azure:azure-storage-blob` from 12.23.0 to 12.28.1 ([#16501](https://github.com/opensearch-project/OpenSearch/pull/16501)) - Bump `org.apache.hadoop:hadoop-minicluster` from 3.4.0 to 3.4.1 ([#16550](https://github.com/opensearch-project/OpenSearch/pull/16550)) +- Bump `org.apache.xmlbeans:xmlbeans` from 5.2.1 to 5.2.2 ([#16612](https://github.com/opensearch-project/OpenSearch/pull/16612)) - Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 9.46 ([#16611](https://github.com/opensearch-project/OpenSearch/pull/16611)) - Bump `lycheeverse/lychee-action` from 2.0.2 to 2.1.0 ([#16610](https://github.com/opensearch-project/OpenSearch/pull/16610)) - Bump `me.champeau.gradle.japicmp` from 0.4.4 to 0.4.5 ([#16614](https://github.com/opensearch-project/OpenSearch/pull/16614)) diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 81ac52b97cefa..4f30ea9ea7e22 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -89,7 +89,7 @@ dependencies { api "org.apache.poi:poi:${versions.poi}" api "org.apache.poi:poi-ooxml-lite:${versions.poi}" api "commons-codec:commons-codec:${versions.commonscodec}" - api 'org.apache.xmlbeans:xmlbeans:5.2.1' + api 'org.apache.xmlbeans:xmlbeans:5.2.2' api 'org.apache.commons:commons-collections4:4.4' // MS Office api "org.apache.poi:poi-scratchpad:${versions.poi}" diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.2.1.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.2.1.jar.sha1 deleted file mode 100644 index eaab556163e5c..0000000000000 --- a/plugins/ingest-attachment/licenses/xmlbeans-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e16ddf17fe181c202b097e0dcc0ee2fed91cb7da \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.2.2.jar.sha1 new file mode 100644 index 0000000000000..613c1028dbd6d --- /dev/null +++ b/plugins/ingest-attachment/licenses/xmlbeans-5.2.2.jar.sha1 @@ -0,0 +1 @@ +586ffe10ae9864e19e85c24bd060790a70586f72 \ No newline at end of file From 53d41d3fac28c9f72d5883467d5a6211ad09feac Mon Sep 17 00:00:00 2001 From: inpink <108166692+inpink@users.noreply.github.com> Date: Tue, 12 Nov 2024 21:47:36 +0900 Subject: [PATCH 27/30] feat: add vertical scaling and SoftReference for snapshot repository data cache (#16489) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Applies `SoftReference` to cached repository data for efficient memory management under heap pressure. - Enables cache size configuration in `opensearch.yml`, adjustable within a range of 500KB to 1% of heap memory. - Sets the default cache size to `Math.max(ByteSizeUnit.KB.toBytes(500), CACHE_MAX_THRESHOLD / 2)` so it’s generally proportional to heap size. In cases where 1% of the heap is less than 1000KB, indicating a low-memory environment, the default reverts to 500KB as before. - Since `BytesReference` internally uses `byte[]`, the compressed array size is capped at `Integer.MAX_VALUE - 8` to ensure compatibility with JDK limitations on array sizes. Therefore, the maximum cache size cannot exceed this limit. Signed-off-by: inpink --- CHANGELOG.md | 1 + .../common/settings/ClusterSettings.java | 1 + .../blobstore/BlobStoreRepository.java | 111 ++++++++++++++++-- .../settings/MemorySizeSettingsTests.java | 63 +++++++++- .../blobstore/BlobStoreRepositoryTests.java | 50 ++++++++ 5 files changed, 209 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e8dd188709cd2..515cf0ce93157 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Make IndexStoreListener a pluggable interface ([#16583](https://github.com/opensearch-project/OpenSearch/pull/16583)) - Support for keyword fields in star-tree index ([#16233](https://github.com/opensearch-project/OpenSearch/pull/16233)) - Add a flag in QueryShardContext to differentiate inner hit query ([#16600](https://github.com/opensearch-project/OpenSearch/pull/16600)) +- Add vertical scaling and SoftReference for snapshot repository data cache ([#16489](https://github.com/opensearch-project/OpenSearch/pull/16489)) ### Dependencies - Bump `com.azure:azure-storage-common` from 12.25.1 to 12.27.1 ([#16521](https://github.com/opensearch-project/OpenSearch/pull/16521)) diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index cac4b3914df5a..c836984655ad1 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -786,6 +786,7 @@ public void apply(Settings value, Settings current, Settings previous) { // Snapshot related Settings BlobStoreRepository.SNAPSHOT_SHARD_PATH_PREFIX_SETTING, BlobStoreRepository.SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING, + BlobStoreRepository.SNAPSHOT_REPOSITORY_DATA_CACHE_THRESHOLD, SearchService.CLUSTER_ALLOW_DERIVED_FIELD_SETTING, diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 243d0021fac2e..c1305fa563b16 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -142,6 +142,7 @@ import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.IndexMetaDataGenerations; @@ -167,6 +168,7 @@ import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; +import java.lang.ref.SoftReference; import java.nio.file.NoSuchFileException; import java.util.ArrayList; import java.util.Arrays; @@ -196,6 +198,7 @@ import java.util.stream.LongStream; import java.util.stream.Stream; +import static org.opensearch.common.unit.MemorySizeValue.parseBytesSizeValueOrHeapRatio; import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1; import static org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName; import static org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS; @@ -253,6 +256,23 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp */ public static final String VIRTUAL_DATA_BLOB_PREFIX = "v__"; + public static final String SNAPSHOT_REPOSITORY_DATA_CACHET_THRESHOLD_SETTING_NAME = "snapshot.repository_data.cache.threshold"; + + public static final double SNAPSHOT_REPOSITORY_DATA_CACHE_THRESHOLD_DEFAULT_PERCENTAGE = 0.01; + + public static final long CACHE_MIN_THRESHOLD = ByteSizeUnit.KB.toBytes(500); + + public static final long CACHE_MAX_THRESHOLD = calculateMaxSnapshotRepositoryDataCacheThreshold(); + + public static final long CACHE_DEFAULT_THRESHOLD = calculateDefaultSnapshotRepositoryDataCacheThreshold(); + + /** + * Set to Integer.MAX_VALUE - 8 to prevent OutOfMemoryError due to array header requirements, following the limit used in certain JDK versions. + * This ensures compatibility across various JDK versions. For a practical usage example, + * see this link: https://github.com/openjdk/jdk11u/blob/cee8535a9d3de8558b4b5028d68e397e508bef71/src/jdk.zipfs/share/classes/jdk/nio/zipfs/ByteArrayChannel.java#L226 + */ + private static final int MAX_SAFE_ARRAY_SIZE = Integer.MAX_VALUE - 8; + /** * When set to {@code true}, {@link #bestEffortConsistency} will be set to {@code true} and concurrent modifications of the repository * contents will not result in the repository being marked as corrupted. @@ -275,6 +295,58 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp Setting.Property.Deprecated ); + /** + * Sets the cache size for snapshot repository data: the valid range is within 500Kb ... 1% of the node heap memory. + */ + public static final Setting SNAPSHOT_REPOSITORY_DATA_CACHE_THRESHOLD = new Setting<>( + SNAPSHOT_REPOSITORY_DATA_CACHET_THRESHOLD_SETTING_NAME, + CACHE_DEFAULT_THRESHOLD + "b", + (s) -> { + ByteSizeValue userDefinedLimit = parseBytesSizeValueOrHeapRatio(s, SNAPSHOT_REPOSITORY_DATA_CACHET_THRESHOLD_SETTING_NAME); + long userDefinedLimitBytes = userDefinedLimit.getBytes(); + + if (userDefinedLimitBytes > CACHE_MAX_THRESHOLD) { + throw new IllegalArgumentException( + "[" + + SNAPSHOT_REPOSITORY_DATA_CACHET_THRESHOLD_SETTING_NAME + + "] cannot be larger than [" + + CACHE_MAX_THRESHOLD + + "] bytes." + ); + } + + if (userDefinedLimitBytes < CACHE_MIN_THRESHOLD) { + throw new IllegalArgumentException( + "[" + + SNAPSHOT_REPOSITORY_DATA_CACHET_THRESHOLD_SETTING_NAME + + "] cannot be smaller than [" + + CACHE_MIN_THRESHOLD + + "] bytes." + ); + } + + return userDefinedLimit; + }, + Setting.Property.NodeScope + ); + + public static long calculateDefaultSnapshotRepositoryDataCacheThreshold() { + return Math.max(ByteSizeUnit.KB.toBytes(500), CACHE_MAX_THRESHOLD / 2); + } + + public static long calculateMaxSnapshotRepositoryDataCacheThreshold() { + long jvmHeapSize = JvmInfo.jvmInfo().getMem().getHeapMax().getBytes(); + long defaultThresholdOfHeap = (long) (jvmHeapSize * SNAPSHOT_REPOSITORY_DATA_CACHE_THRESHOLD_DEFAULT_PERCENTAGE); + long defaultAbsoluteThreshold = ByteSizeUnit.KB.toBytes(500); + long maxThreshold = calculateMaxWithinIntLimit(defaultThresholdOfHeap, defaultAbsoluteThreshold); + + return maxThreshold; + } + + protected static long calculateMaxWithinIntLimit(long defaultThresholdOfHeap, long defaultAbsoluteThreshold) { + return Math.min(Math.max(defaultThresholdOfHeap, defaultAbsoluteThreshold), MAX_SAFE_ARRAY_SIZE); + } + /** * Size hint for the IO buffer size to use when reading from and writing to the repository. */ @@ -461,6 +533,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private volatile boolean enableAsyncDeletion; + protected final long repositoryDataCacheThreshold; + /** * Flag that is set to {@code true} if this instance is started with {@link #metadata} that has a higher value for * {@link RepositoryMetadata#pendingGeneration()} than for {@link RepositoryMetadata#generation()} indicating a full cluster restart @@ -515,6 +589,7 @@ protected BlobStoreRepository( this.snapshotShardPathPrefix = SNAPSHOT_SHARD_PATH_PREFIX_SETTING.get(clusterService.getSettings()); this.enableAsyncDeletion = SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings().addSettingsUpdateConsumer(SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING, this::setEnableAsyncDeletion); + this.repositoryDataCacheThreshold = SNAPSHOT_REPOSITORY_DATA_CACHE_THRESHOLD.get(clusterService.getSettings()).getBytes(); } @Override @@ -1132,7 +1207,8 @@ private RepositoryData safeRepositoryData(long repositoryStateId, Map> softRef = latestKnownRepositoryData.get(); + cached = (softRef != null) ? softRef.get() : null; } if (genToLoad > generation) { // It's always a possibility to not see the latest index-N in the listing here on an eventually consistent blob store, just @@ -2926,7 +3002,9 @@ public void endVerification(String seed) { private final AtomicLong latestKnownRepoGen = new AtomicLong(RepositoryData.UNKNOWN_REPO_GEN); // Best effort cache of the latest known repository data and its generation, cached serialized as compressed json - private final AtomicReference> latestKnownRepositoryData = new AtomicReference<>(); + private final AtomicReference>> latestKnownRepositoryData = new AtomicReference<>( + new SoftReference<>(null) + ); @Override public void getRepositoryData(ActionListener listener) { @@ -2934,7 +3012,9 @@ public void getRepositoryData(ActionListener listener) { listener.onFailure(corruptedStateException(null)); return; } - final Tuple cached = latestKnownRepositoryData.get(); + final SoftReference> softRef = latestKnownRepositoryData.get(); + final Tuple cached = (softRef != null) ? softRef.get() : null; + // Fast path loading repository data directly from cache if we're in fully consistent mode and the cache matches up with // the latest known repository generation if (bestEffortConsistency == false && cached != null && cached.v1() == latestKnownRepoGen.get()) { @@ -2983,7 +3063,8 @@ private void doGetRepositoryData(ActionListener listener) { genToLoad = latestKnownRepoGen.get(); } try { - final Tuple cached = latestKnownRepositoryData.get(); + final SoftReference> softRef = latestKnownRepositoryData.get(); + final Tuple cached = (softRef != null) ? softRef.get() : null; final RepositoryData loaded; // Caching is not used with #bestEffortConsistency see docs on #cacheRepositoryData for details if (bestEffortConsistency == false && cached != null && cached.v1() == genToLoad) { @@ -3050,19 +3131,22 @@ private void cacheRepositoryData(BytesReference updated, long generation) { try { serialized = CompressorRegistry.defaultCompressor().compress(updated); final int len = serialized.length(); - if (len > ByteSizeUnit.KB.toBytes(500)) { + long cacheWarningThreshold = Math.min(repositoryDataCacheThreshold * 10, MAX_SAFE_ARRAY_SIZE); + if (len > repositoryDataCacheThreshold) { logger.debug( - "Not caching repository data of size [{}] for repository [{}] because it is larger than 500KB in" + "Not caching repository data of size [{}] for repository [{}] because it is larger than [{}] bytes in" + " serialized size", len, - metadata.name() + metadata.name(), + repositoryDataCacheThreshold ); - if (len > ByteSizeUnit.MB.toBytes(5)) { + if (len > cacheWarningThreshold) { logger.warn( - "Your repository metadata blob for repository [{}] is larger than 5MB. Consider moving to a fresh" + "Your repository metadata blob for repository [{}] is larger than [{}] bytes. Consider moving to a fresh" + " repository for new snapshots or deleting unneeded snapshots from your repository to ensure stable" + " repository behavior going forward.", - metadata.name() + metadata.name(), + cacheWarningThreshold ); } // Set empty repository data to not waste heap for an outdated cached value @@ -3074,11 +3158,12 @@ private void cacheRepositoryData(BytesReference updated, long generation) { logger.warn("Failed to serialize repository data", e); return; } - latestKnownRepositoryData.updateAndGet(known -> { + latestKnownRepositoryData.updateAndGet(knownRef -> { + Tuple known = (knownRef != null) ? knownRef.get() : null; if (known != null && known.v1() > generation) { - return known; + return knownRef; } - return new Tuple<>(generation, serialized); + return new SoftReference<>(new Tuple<>(generation, serialized)); }); } } diff --git a/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java b/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java index 95db7c2cfacaa..78782112be844 100644 --- a/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java +++ b/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java @@ -34,6 +34,7 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.indices.IndexingMemoryController; import org.opensearch.indices.IndicesQueryCache; @@ -41,6 +42,7 @@ import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.monitor.jvm.JvmInfo; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchTestCase; import static org.hamcrest.Matchers.equalTo; @@ -127,22 +129,75 @@ public void testIndicesFieldDataCacheSetting() { ); } + public void testSnapshotRepositoryDataCacheSizeSetting() { + assertMemorySizeSettingInRange( + BlobStoreRepository.SNAPSHOT_REPOSITORY_DATA_CACHE_THRESHOLD, + "snapshot.repository_data.cache.threshold", + new ByteSizeValue(BlobStoreRepository.calculateDefaultSnapshotRepositoryDataCacheThreshold()), + ByteSizeUnit.KB.toBytes(500), + 1.0 + ); + } + private void assertMemorySizeSetting(Setting setting, String settingKey, ByteSizeValue defaultValue) { assertMemorySizeSetting(setting, settingKey, defaultValue, Settings.EMPTY); } private void assertMemorySizeSetting(Setting setting, String settingKey, ByteSizeValue defaultValue, Settings settings) { + assertMemorySizeSetting(setting, settingKey, defaultValue, 25.0, 1024, settings); + } + + private void assertMemorySizeSetting( + Setting setting, + String settingKey, + ByteSizeValue defaultValue, + double availablePercentage, + long availableBytes, + Settings settings + ) { assertThat(setting, notNullValue()); assertThat(setting.getKey(), equalTo(settingKey)); assertThat(setting.getProperties(), hasItem(Property.NodeScope)); assertThat(setting.getDefault(settings), equalTo(defaultValue)); - Settings settingWithPercentage = Settings.builder().put(settingKey, "25%").build(); + Settings settingWithPercentage = Settings.builder().put(settingKey, percentageAsString(availablePercentage)).build(); assertThat( setting.get(settingWithPercentage), - equalTo(new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.25))) + equalTo( + new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * percentageAsFraction(availablePercentage))) + ) ); - Settings settingWithBytesValue = Settings.builder().put(settingKey, "1024b").build(); - assertThat(setting.get(settingWithBytesValue), equalTo(new ByteSizeValue(1024))); + Settings settingWithBytesValue = Settings.builder().put(settingKey, availableBytes + "b").build(); + assertThat(setting.get(settingWithBytesValue), equalTo(new ByteSizeValue(availableBytes))); } + private void assertMemorySizeSettingInRange( + Setting setting, + String settingKey, + ByteSizeValue defaultValue, + long minBytes, + double maxPercentage + ) { + assertMemorySizeSetting(setting, settingKey, defaultValue, maxPercentage, minBytes, Settings.EMPTY); + + assertThrows(IllegalArgumentException.class, () -> { + Settings settingWithTooSmallValue = Settings.builder().put(settingKey, minBytes - 1).build(); + setting.get(settingWithTooSmallValue); + }); + + assertThrows(IllegalArgumentException.class, () -> { + double unavailablePercentage = maxPercentage + 0.1; + Settings settingWithPercentageExceedingLimit = Settings.builder() + .put(settingKey, percentageAsString(unavailablePercentage)) + .build(); + setting.get(settingWithPercentageExceedingLimit); + }); + } + + private double percentageAsFraction(double availablePercentage) { + return availablePercentage / 100.0; + } + + private String percentageAsString(double availablePercentage) { + return availablePercentage + "%"; + } } diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java index aa10b7dc18381..620b18ad9d7cf 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -92,6 +92,7 @@ import java.util.stream.Collectors; import static org.opensearch.repositories.RepositoryDataTests.generateRandomRepoData; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.calculateMaxWithinIntLimit; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; @@ -653,4 +654,53 @@ public void testGetRestrictedSystemRepositorySettings() { assertTrue(settings.contains(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY)); repository.close(); } + + public void testSnapshotRepositoryDataCacheDefaultSetting() { + // given + BlobStoreRepository repository = setupRepo(); + long maxThreshold = BlobStoreRepository.calculateMaxSnapshotRepositoryDataCacheThreshold(); + + // when + long expectedThreshold = Math.max(ByteSizeUnit.KB.toBytes(500), maxThreshold / 2); + + // then + assertEquals(repository.repositoryDataCacheThreshold, expectedThreshold); + } + + public void testHeapThresholdUsed() { + // given + long defaultThresholdOfHeap = ByteSizeUnit.GB.toBytes(1); + long defaultAbsoluteThreshold = ByteSizeUnit.KB.toBytes(500); + + // when + long expectedThreshold = calculateMaxWithinIntLimit(defaultThresholdOfHeap, defaultAbsoluteThreshold); + + // then + assertEquals(defaultThresholdOfHeap, expectedThreshold); + } + + public void testAbsoluteThresholdUsed() { + // given + long defaultThresholdOfHeap = ByteSizeUnit.KB.toBytes(499); + long defaultAbsoluteThreshold = ByteSizeUnit.KB.toBytes(500); + + // when + long result = calculateMaxWithinIntLimit(defaultThresholdOfHeap, defaultAbsoluteThreshold); + + // then + assertEquals(defaultAbsoluteThreshold, result); + } + + public void testThresholdCappedAtIntMax() { + // given + int maxSafeArraySize = Integer.MAX_VALUE - 8; + long defaultThresholdOfHeap = (long) maxSafeArraySize + 1; + long defaultAbsoluteThreshold = ByteSizeUnit.KB.toBytes(500); + + // when + long expectedThreshold = calculateMaxWithinIntLimit(defaultThresholdOfHeap, defaultAbsoluteThreshold); + + // then + assertEquals(maxSafeArraySize, expectedThreshold); + } } From 5068fad53922f992ef651f78c148df6d9170c361 Mon Sep 17 00:00:00 2001 From: Prudhvi Godithi Date: Tue, 12 Nov 2024 10:52:41 -0800 Subject: [PATCH 28/30] Add a new configuration setting `synonym_analyzer` for `synonym_graph` and `synonym`. (#16488) * Add custom synonym_analyzer * synonym_analyzer configuration setting --------- Signed-off-by: Prudhvi Godithi --- CHANGELOG.md | 1 + .../common/CommonAnalysisModulePlugin.java | 29 +++++- .../SynonymGraphTokenFilterFactory.java | 11 ++- .../common/SynonymTokenFilterFactory.java | 28 +++++- .../common/CommonAnalysisFactoryTests.java | 22 +++++ .../common/SynonymsAnalysisTests.java | 95 +++++++++++++++++-- .../indices/analysis/AnalysisModule.java | 7 +- .../opensearch/plugins/AnalysisPlugin.java | 9 ++ .../indices/analysis/AnalysisModuleTests.java | 53 +++++++++++ .../analysis/AnalysisFactoryTestCase.java | 11 +++ 10 files changed, 250 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 515cf0ce93157..e46628249c91e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Support for keyword fields in star-tree index ([#16233](https://github.com/opensearch-project/OpenSearch/pull/16233)) - Add a flag in QueryShardContext to differentiate inner hit query ([#16600](https://github.com/opensearch-project/OpenSearch/pull/16600)) - Add vertical scaling and SoftReference for snapshot repository data cache ([#16489](https://github.com/opensearch-project/OpenSearch/pull/16489)) +- Add new configuration setting `synonym_analyzer`, to the `synonym` and `synonym_graph` filters, enabling the specification of a custom analyzer for reading the synonym file ([#16488](https://github.com/opensearch-project/OpenSearch/pull/16488)). ### Dependencies - Bump `com.azure:azure-storage-common` from 12.25.1 to 12.27.1 ([#16521](https://github.com/opensearch-project/OpenSearch/pull/16521)) diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java index f14e499081ce9..7f9437972a358 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java @@ -146,6 +146,7 @@ import org.opensearch.index.analysis.PreConfiguredTokenizer; import org.opensearch.index.analysis.TokenFilterFactory; import org.opensearch.index.analysis.TokenizerFactory; +import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.indices.analysis.AnalysisModule.AnalysisProvider; import org.opensearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy; import org.opensearch.plugins.AnalysisPlugin; @@ -247,7 +248,7 @@ public Map>> getAn } @Override - public Map> getTokenFilters() { + public Map> getTokenFilters(AnalysisModule analysisModule) { Map> filters = new TreeMap<>(); filters.put("apostrophe", ApostropheFilterFactory::new); filters.put("arabic_normalization", ArabicNormalizationFilterFactory::new); @@ -332,14 +333,36 @@ public Map> getTokenFilters() { filters.put("sorani_normalization", SoraniNormalizationFilterFactory::new); filters.put("stemmer_override", requiresAnalysisSettings(StemmerOverrideTokenFilterFactory::new)); filters.put("stemmer", StemmerTokenFilterFactory::new); - filters.put("synonym", requiresAnalysisSettings(SynonymTokenFilterFactory::new)); - filters.put("synonym_graph", requiresAnalysisSettings(SynonymGraphTokenFilterFactory::new)); filters.put("trim", TrimTokenFilterFactory::new); filters.put("truncate", requiresAnalysisSettings(TruncateTokenFilterFactory::new)); filters.put("unique", UniqueTokenFilterFactory::new); filters.put("uppercase", UpperCaseTokenFilterFactory::new); filters.put("word_delimiter_graph", WordDelimiterGraphTokenFilterFactory::new); filters.put("word_delimiter", WordDelimiterTokenFilterFactory::new); + filters.put( + "synonym", + requiresAnalysisSettings( + (indexSettings, environment, name, settings) -> new SynonymTokenFilterFactory( + indexSettings, + environment, + name, + settings, + analysisModule.getAnalysisRegistry() + ) + ) + ); + filters.put( + "synonym_graph", + requiresAnalysisSettings( + (indexSettings, environment, name, settings) -> new SynonymGraphTokenFilterFactory( + indexSettings, + environment, + name, + settings, + analysisModule.getAnalysisRegistry() + ) + ) + ); return filters; } diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SynonymGraphTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SynonymGraphTokenFilterFactory.java index fed959108c411..c2e20e99473de 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SynonymGraphTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SynonymGraphTokenFilterFactory.java @@ -40,6 +40,7 @@ import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AnalysisMode; +import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.analysis.CharFilterFactory; import org.opensearch.index.analysis.TokenFilterFactory; import org.opensearch.index.analysis.TokenizerFactory; @@ -49,8 +50,14 @@ public class SynonymGraphTokenFilterFactory extends SynonymTokenFilterFactory { - SynonymGraphTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { - super(indexSettings, env, name, settings); + SynonymGraphTokenFilterFactory( + IndexSettings indexSettings, + Environment env, + String name, + Settings settings, + AnalysisRegistry analysisRegistry + ) { + super(indexSettings, env, name, settings, analysisRegistry); } @Override diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SynonymTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SynonymTokenFilterFactory.java index 01a65e87d7466..1cd78170e66c8 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SynonymTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SynonymTokenFilterFactory.java @@ -44,11 +44,13 @@ import org.opensearch.index.analysis.AbstractTokenFilterFactory; import org.opensearch.index.analysis.Analysis; import org.opensearch.index.analysis.AnalysisMode; +import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.analysis.CharFilterFactory; import org.opensearch.index.analysis.CustomAnalyzer; import org.opensearch.index.analysis.TokenFilterFactory; import org.opensearch.index.analysis.TokenizerFactory; +import java.io.IOException; import java.io.Reader; import java.io.StringReader; import java.util.List; @@ -64,8 +66,16 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { protected final Settings settings; protected final Environment environment; protected final AnalysisMode analysisMode; - - SynonymTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { + private final String synonymAnalyzerName; + private final AnalysisRegistry analysisRegistry; + + SynonymTokenFilterFactory( + IndexSettings indexSettings, + Environment env, + String name, + Settings settings, + AnalysisRegistry analysisRegistry + ) { super(indexSettings, name, settings); this.settings = settings; @@ -83,6 +93,8 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { boolean updateable = settings.getAsBoolean("updateable", false); this.analysisMode = updateable ? AnalysisMode.SEARCH_TIME : AnalysisMode.ALL; this.environment = env; + this.synonymAnalyzerName = settings.get("synonym_analyzer", null); + this.analysisRegistry = analysisRegistry; } @Override @@ -137,6 +149,17 @@ Analyzer buildSynonymAnalyzer( List tokenFilters, Function allFilters ) { + if (synonymAnalyzerName != null) { + Analyzer customSynonymAnalyzer; + try { + customSynonymAnalyzer = analysisRegistry.getAnalyzer(synonymAnalyzerName); + } catch (IOException e) { + throw new RuntimeException(e); + } + if (customSynonymAnalyzer != null) { + return customSynonymAnalyzer; + } + } return new CustomAnalyzer( tokenizer, charFilters.toArray(new CharFilterFactory[0]), @@ -177,5 +200,4 @@ Reader getRulesFromSettings(Environment env) { } return rulesReader; } - } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java index 7e3140f8bcba3..1f4faf53dced5 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java @@ -39,12 +39,16 @@ import org.apache.lucene.analysis.snowball.SnowballPorterFilterFactory; import org.apache.lucene.analysis.te.TeluguNormalizationFilterFactory; import org.apache.lucene.analysis.te.TeluguStemFilterFactory; +import org.opensearch.index.analysis.TokenFilterFactory; import org.opensearch.indices.analysis.AnalysisFactoryTestCase; +import org.opensearch.indices.analysis.AnalysisModule; import java.util.List; import java.util.Map; import java.util.TreeMap; +import org.mockito.Mock; + import static java.util.Collections.emptyList; import static java.util.stream.Collectors.toList; @@ -53,6 +57,9 @@ public CommonAnalysisFactoryTests() { super(new CommonAnalysisModulePlugin()); } + @Mock + private AnalysisModule analysisModule; + @Override protected Map> getTokenizers() { Map> tokenizers = new TreeMap<>(super.getTokenizers()); @@ -302,4 +309,19 @@ private void markedTestCase(String name, Map> map) { unmarked ); } + + /** + * Tests the getTokenFilters(AnalysisModule) method to verify: + * 1. All token filters are properly loaded + * 2. Basic filters remain available + * 3. Synonym filters remain available when AnalysisModule is provided + */ + public void testGetTokenFiltersWithAnalysisModule() { + CommonAnalysisModulePlugin plugin = (CommonAnalysisModulePlugin) getAnalysisPlugin(); + Map> filters = plugin.getTokenFilters(analysisModule); + assertNotNull("Token filters should not be null", filters); + assertTrue("Should contain basic filters", filters.containsKey("lowercase")); + assertTrue("Should contain synonym filter", filters.containsKey("synonym")); + assertTrue("Should contain synonym_graph filter", filters.containsKey("synonym_graph")); + } } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/SynonymsAnalysisTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/SynonymsAnalysisTests.java index 8c8b8ac7f61c0..33d92e01a85b1 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/SynonymsAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/SynonymsAnalysisTests.java @@ -41,11 +41,14 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; +import org.opensearch.env.TestEnvironment; import org.opensearch.index.IndexSettings; +import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.analysis.PreConfiguredTokenFilter; import org.opensearch.index.analysis.TokenFilterFactory; import org.opensearch.index.analysis.TokenizerFactory; +import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -63,6 +66,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.startsWith; +import static org.apache.lucene.tests.analysis.BaseTokenStreamTestCase.assertTokenStreamContents; public class SynonymsAnalysisTests extends OpenSearchTestCase { private IndexAnalyzers indexAnalyzers; @@ -255,14 +259,16 @@ public void testTokenFiltersBypassSynonymAnalysis() throws IOException { .put("hyphenation_patterns_path", "foo") .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); - + Environment environment = TestEnvironment.newEnvironment(settings); + AnalysisModule analysisModule = new AnalysisModule(environment, Collections.singletonList(new CommonAnalysisModulePlugin())); + AnalysisRegistry analysisRegistry = analysisModule.getAnalysisRegistry(); String[] bypassingFactories = new String[] { "dictionary_decompounder" }; CommonAnalysisModulePlugin plugin = new CommonAnalysisModulePlugin(); for (String factory : bypassingFactories) { - TokenFilterFactory tff = plugin.getTokenFilters().get(factory).get(idxSettings, null, factory, settings); - TokenizerFactory tok = new KeywordTokenizerFactory(idxSettings, null, "keyword", settings); - SynonymTokenFilterFactory stff = new SynonymTokenFilterFactory(idxSettings, null, "synonym", settings); + TokenFilterFactory tff = plugin.getTokenFilters(analysisModule).get(factory).get(idxSettings, environment, factory, settings); + TokenizerFactory tok = new KeywordTokenizerFactory(idxSettings, environment, "keyword", settings); + SynonymTokenFilterFactory stff = new SynonymTokenFilterFactory(idxSettings, environment, "synonym", settings, analysisRegistry); Analyzer analyzer = stff.buildSynonymAnalyzer(tok, Collections.emptyList(), Collections.singletonList(tff), null); try (TokenStream ts = analyzer.tokenStream("field", "text")) { @@ -319,7 +325,11 @@ public void testDisallowedTokenFilters() throws IOException { .putList("common_words", "a", "b") .put("output_unigrams", "true") .build(); + + Environment environment = TestEnvironment.newEnvironment(settings); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + AnalysisModule analysisModule = new AnalysisModule(environment, Collections.singletonList(new CommonAnalysisModulePlugin())); + AnalysisRegistry analysisRegistry = analysisModule.getAnalysisRegistry(); CommonAnalysisModulePlugin plugin = new CommonAnalysisModulePlugin(); String[] disallowedFactories = new String[] { @@ -333,9 +343,9 @@ public void testDisallowedTokenFilters() throws IOException { "fingerprint" }; for (String factory : disallowedFactories) { - TokenFilterFactory tff = plugin.getTokenFilters().get(factory).get(idxSettings, null, factory, settings); - TokenizerFactory tok = new KeywordTokenizerFactory(idxSettings, null, "keyword", settings); - SynonymTokenFilterFactory stff = new SynonymTokenFilterFactory(idxSettings, null, "synonym", settings); + TokenFilterFactory tff = plugin.getTokenFilters(analysisModule).get(factory).get(idxSettings, environment, factory, settings); + TokenizerFactory tok = new KeywordTokenizerFactory(idxSettings, environment, "keyword", settings); + SynonymTokenFilterFactory stff = new SynonymTokenFilterFactory(idxSettings, environment, "synonym", settings, analysisRegistry); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, @@ -362,4 +372,75 @@ private void match(String analyzerName, String source, String target) throws IOE MatcherAssert.assertThat(target, equalTo(sb.toString().trim())); } + /** + * Tests the integration of word delimiter and synonym graph filters with synonym_analyzer based on issue #16263. + * This test verifies the correct handling of: + * 1. Hyphenated words with word delimiter (e.g., "note-book" → ["notebook", "note", "book"]) + * 2. Multi-word synonyms (e.g., "mobile phone" → ["smartphone"]) + * 3. Single word synonyms (e.g., "laptop" → ["notebook"]) + * + * @see Issue #16263 + */ + public void testSynonymAnalyzerWithWordDelimiter() throws IOException { + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put("path.home", createTempDir().toString()) + .put("index.analysis.filter.custom_word_delimiter.type", "word_delimiter_graph") + .put("index.analysis.filter.custom_word_delimiter.generate_word_parts", true) + .put("index.analysis.filter.custom_word_delimiter.catenate_all", true) + .put("index.analysis.filter.custom_word_delimiter.split_on_numerics", false) + .put("index.analysis.filter.custom_word_delimiter.split_on_case_change", false) + .put("index.analysis.filter.custom_pattern_replace_filter.type", "pattern_replace") + .put("index.analysis.filter.custom_pattern_replace_filter.pattern", "(-)") + .put("index.analysis.filter.custom_pattern_replace_filter.replacement", " ") + .put("index.analysis.filter.custom_pattern_replace_filter.all", true) + .put("index.analysis.filter.custom_synonym_graph_filter.type", "synonym_graph") + .putList( + "index.analysis.filter.custom_synonym_graph_filter.synonyms", + "laptop => notebook", + "smartphone, mobile phone, cell phone => smartphone", + "tv, television => television" + ) + .put("index.analysis.filter.custom_synonym_graph_filter.synonym_analyzer", "standard") + .put("index.analysis.analyzer.text_en_index.type", "custom") + .put("index.analysis.analyzer.text_en_index.tokenizer", "whitespace") + .putList( + "index.analysis.analyzer.text_en_index.filter", + "lowercase", + "custom_word_delimiter", + "custom_synonym_graph_filter", + "custom_pattern_replace_filter", + "flatten_graph" + ) + .build(); + Environment environment = TestEnvironment.newEnvironment(settings); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); + AnalysisModule module = new AnalysisModule(environment, Collections.singletonList(new CommonAnalysisModulePlugin())); + IndexAnalyzers analyzers = module.getAnalysisRegistry().build(indexSettings); + try (TokenStream ts = analyzers.get("text_en_index").tokenStream("", "note-book")) { + assertTokenStreamContents( + ts, + new String[] { "notebook", "note", "book" }, + new int[] { 0, 0, 5 }, + new int[] { 9, 4, 9 }, + new String[] { "word", "word", "word" }, + new int[] { 1, 0, 1 }, + new int[] { 2, 1, 1 } + ); + } + try (TokenStream ts = analyzers.get("text_en_index").tokenStream("", "mobile phone")) { + assertTokenStreamContents( + ts, + new String[] { "smartphone" }, + new int[] { 0 }, + new int[] { 12 }, + new String[] { "SYNONYM" }, + new int[] { 1 }, + new int[] { 1 } + ); + } + try (TokenStream ts = analyzers.get("text_en_index").tokenStream("", "laptop")) { + assertTokenStreamContents(ts, new String[] { "notebook" }, new int[] { 0 }, new int[] { 6 }); + } + } } diff --git a/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java b/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java index 0926d497087d1..dbb3035a18f74 100644 --- a/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java +++ b/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java @@ -165,7 +165,12 @@ public boolean requiresAnalysisSettings() { ) ); - tokenFilters.extractAndRegister(plugins, AnalysisPlugin::getTokenFilters); + for (AnalysisPlugin plugin : plugins) { + Map> filters = plugin.getTokenFilters(this); + for (Map.Entry> entry : filters.entrySet()) { + tokenFilters.register(entry.getKey(), entry.getValue()); + } + } return tokenFilters; } diff --git a/server/src/main/java/org/opensearch/plugins/AnalysisPlugin.java b/server/src/main/java/org/opensearch/plugins/AnalysisPlugin.java index 53dcc916b244f..58e43633777c9 100644 --- a/server/src/main/java/org/opensearch/plugins/AnalysisPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/AnalysisPlugin.java @@ -47,6 +47,7 @@ import org.opensearch.index.analysis.PreConfiguredTokenizer; import org.opensearch.index.analysis.TokenFilterFactory; import org.opensearch.index.analysis.TokenizerFactory; +import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.indices.analysis.AnalysisModule.AnalysisProvider; import java.io.IOException; @@ -84,6 +85,14 @@ default Map> getCharFilters() { return emptyMap(); } + /** + * Override to add additional {@link TokenFilter}s that need access to the AnalysisModule. + * The default implementation for plugins that don't need AnalysisModule calls the existing getTokenFilters() method. + */ + default Map> getTokenFilters(AnalysisModule analysisModule) { + return getTokenFilters(); + } + /** * Override to add additional {@link TokenFilter}s. See {@link #requiresAnalysisSettings(AnalysisProvider)} * how to on get the configuration from the index. diff --git a/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java b/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java index c9e26d6d6159a..74bc987c44b15 100644 --- a/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java +++ b/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java @@ -56,6 +56,8 @@ import org.opensearch.index.analysis.CustomAnalyzer; import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.analysis.MyFilterTokenFilterFactory; +import org.opensearch.index.analysis.NameOrDefinition; +import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.index.analysis.PreConfiguredCharFilter; import org.opensearch.index.analysis.PreConfiguredTokenFilter; import org.opensearch.index.analysis.PreConfiguredTokenizer; @@ -80,6 +82,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -521,4 +524,54 @@ public boolean incrementToken() throws IOException { } } + /** + * Tests registration and functionality of token filters that require access to the AnalysisModule. + * This test verifies the token filter registration using the extended getTokenFilters(AnalysisModule) method + */ + public void testTokenFilterRegistrationWithModuleReference() throws IOException { + class TestPlugin implements AnalysisPlugin { + @Override + public Map> getTokenFilters(AnalysisModule module) { + return Map.of( + "test_filter", + (indexSettings, env, name, settings) -> AppendTokenFilter.factoryForSuffix("_" + module.hashCode()) + ); + } + } + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard") + .put("index.analysis.analyzer.my_analyzer.filter", "test_filter") + .build(); + Environment environment = TestEnvironment.newEnvironment(settings); + AnalysisModule module = new AnalysisModule(environment, singletonList(new TestPlugin())); + AnalysisRegistry registry = module.getAnalysisRegistry(); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder().put(settings).build()); + Map tokenFilterFactories = registry.buildTokenFilterFactories(indexSettings); + assertTrue("Token filter 'test_filter' should be registered", tokenFilterFactories.containsKey("test_filter")); + IndexAnalyzers analyzers = registry.build(indexSettings); + String testText = "test"; + TokenStream tokenStream = analyzers.get("my_analyzer").tokenStream("", testText); + CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class); + tokenStream.reset(); + assertTrue("Should have found a token", tokenStream.incrementToken()); + assertEquals("Token should have expected suffix", "test_" + module.hashCode(), charTermAttribute.toString()); + assertFalse("Should not have additional tokens", tokenStream.incrementToken()); + tokenStream.close(); + NamedAnalyzer customAnalyzer = registry.buildCustomAnalyzer( + indexSettings, + false, + new NameOrDefinition("standard"), + Collections.emptyList(), + Collections.singletonList(new NameOrDefinition("test_filter")) + ); + tokenStream = customAnalyzer.tokenStream("", testText); + charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class); + tokenStream.reset(); + assertTrue("Custom analyzer should produce a token", tokenStream.incrementToken()); + assertEquals("Custom analyzer token should have expected suffix", "test_" + module.hashCode(), charTermAttribute.toString()); + assertFalse("Custom analyzer should not produce additional tokens", tokenStream.incrementToken()); + tokenStream.close(); + } } diff --git a/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java index 23cf4d47a49d9..ca23f67215f3d 100644 --- a/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java @@ -248,6 +248,17 @@ public AnalysisFactoryTestCase(AnalysisPlugin plugin) { this.plugin = Objects.requireNonNull(plugin, "plugin is required. use an empty plugin for core"); } + /** + * Returns the AnalysisPlugin instance that was passed to this test case. + * This protected method allows subclasses to access the plugin for testing + * specific analysis components. + * + * @return The AnalysisPlugin instance used by this test case + */ + protected AnalysisPlugin getAnalysisPlugin() { + return plugin; + } + protected Map> getCharFilters() { return KNOWN_CHARFILTERS; } From 548a6505d6eaa88170462ef5eaf45a89a367126e Mon Sep 17 00:00:00 2001 From: rajiv-kv <157019998+rajiv-kv@users.noreply.github.com> Date: Wed, 13 Nov 2024 11:20:23 +0530 Subject: [PATCH 29/30] Support prefix list for remote repository attributes (#16271) * Support prefix list for remote repository attributes Signed-off-by: Rajiv Kumar Vaidyanathan --- CHANGELOG.md | 1 + .../RemotePublicationConfigurationIT.java | 274 +--------------- .../RemoteRepositoryConfigurationIT.java | 308 ++++++++++++++++++ .../coordination/JoinTaskExecutor.java | 19 +- .../metadata/MetadataCreateIndexService.java | 8 +- .../cluster/node/DiscoveryNode.java | 32 +- .../InternalRemoteRoutingTableService.java | 5 +- .../remote/RemoteClusterStateService.java | 6 +- .../index/remote/RemoteIndexPathUploader.java | 20 +- .../RemoteMigrationIndexMetadataUpdater.java | 8 +- .../remotestore/RemoteStoreNodeAttribute.java | 248 +++++++++++--- .../RemoteStorePinnedTimestampService.java | 7 +- .../cluster/node/DiscoveryNodeTests.java | 2 +- .../remote/RemoteIndexPathUploaderTests.java | 14 +- .../node/RemoteStoreNodeAttributeTests.java | 54 +++ .../test/OpenSearchIntegTestCase.java | 2 +- .../test/RemoteStoreAttributeConstants.java | 19 ++ 17 files changed, 635 insertions(+), 392 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteRepositoryConfigurationIT.java create mode 100644 test/framework/src/main/java/org/opensearch/test/RemoteStoreAttributeConstants.java diff --git a/CHANGELOG.md b/CHANGELOG.md index e46628249c91e..c19019ece6c5c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Support for keyword fields in star-tree index ([#16233](https://github.com/opensearch-project/OpenSearch/pull/16233)) - Add a flag in QueryShardContext to differentiate inner hit query ([#16600](https://github.com/opensearch-project/OpenSearch/pull/16600)) - Add vertical scaling and SoftReference for snapshot repository data cache ([#16489](https://github.com/opensearch-project/OpenSearch/pull/16489)) +- Support prefix list for remote repository attributes([#16271](https://github.com/opensearch-project/OpenSearch/pull/16271)) - Add new configuration setting `synonym_analyzer`, to the `synonym` and `synonym_graph` filters, enabling the specification of a custom analyzer for reading the synonym file ([#16488](https://github.com/opensearch-project/OpenSearch/pull/16488)). ### Dependencies diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemotePublicationConfigurationIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemotePublicationConfigurationIT.java index 57bf9eccbf5b4..1b5d924fa0b62 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemotePublicationConfigurationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemotePublicationConfigurationIT.java @@ -8,34 +8,9 @@ package org.opensearch.gateway.remote; -import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.opensearch.common.settings.Settings; -import org.opensearch.plugins.Plugin; -import org.opensearch.remotemigration.MigrationBaseTestCase; -import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; -import org.opensearch.repositories.blobstore.BlobStoreRepository; -import org.opensearch.repositories.fs.FsRepository; -import org.opensearch.repositories.fs.ReloadableFsRepository; -import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.transport.MockTransportService; -import org.junit.Assert; import org.junit.Before; -import java.util.Collection; -import java.util.Locale; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; -import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; -import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; - /** * Tests the compatibility between types of nodes based on the configured repositories * Non Remote node [No Repositories configured] @@ -44,260 +19,15 @@ * Remote Node With Routing Table [Cluster State + Segment + Translog + Routing Table] */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class RemotePublicationConfigurationIT extends MigrationBaseTestCase { - private final String REMOTE_PRI_DOCREP_REP = "remote-primary-docrep-replica"; - - @Override - protected Collection> nodePlugins() { - /* Adding the following mock plugins: - - InternalSettingsPlugin : To override default intervals of retention lease and global ckp sync - - MockFsRepositoryPlugin and MockTransportService.TestPlugin: To ensure remote interactions are not no-op and retention leases are properly propagated - */ - return Stream.concat( - super.nodePlugins().stream(), - Stream.of(InternalSettingsPlugin.class, MockFsRepositoryPlugin.class, MockTransportService.TestPlugin.class) - ).collect(Collectors.toList()); - } - +public class RemotePublicationConfigurationIT extends RemoteRepositoryConfigurationIT { @Before public void setUp() throws Exception { if (segmentRepoPath == null || translogRepoPath == null) { segmentRepoPath = randomRepoPath().toAbsolutePath(); translogRepoPath = randomRepoPath().toAbsolutePath(); } + super.remoteRepoPrefix = "remote_publication"; super.setUp(); } - public Settings.Builder remotePublishConfiguredNodeSetting() { - String stateRepoSettingsAttributeKeyPrefix = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, - REPOSITORY_NAME - ); - String prefixModeVerificationSuffix = BlobStoreRepository.PREFIX_MODE_VERIFICATION_SETTING.getKey(); - String stateRepoTypeAttributeKey = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, - REPOSITORY_NAME - ); - String routingTableRepoTypeAttributeKey = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, - ROUTING_TABLE_REPO_NAME - ); - String routingTableRepoSettingsAttributeKeyPrefix = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, - ROUTING_TABLE_REPO_NAME - ); - - Settings.Builder builder = Settings.builder() - .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, REPOSITORY_NAME) - .put(stateRepoTypeAttributeKey, ReloadableFsRepository.TYPE) - .put(stateRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) - .put(stateRepoSettingsAttributeKeyPrefix + prefixModeVerificationSuffix, true) - .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) - .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, ROUTING_TABLE_REPO_NAME) - .put(routingTableRepoTypeAttributeKey, FsRepository.TYPE) - .put(routingTableRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath); - return builder; - } - - public Settings.Builder remoteWithRoutingTableNodeSetting() { - // Remote Cluster with Routing table - - return Settings.builder() - .put( - remoteStoreClusterSettings( - REPOSITORY_NAME, - segmentRepoPath, - ReloadableFsRepository.TYPE, - REPOSITORY_2_NAME, - translogRepoPath, - ReloadableFsRepository.TYPE, - REPOSITORY_NAME, - segmentRepoPath, - ReloadableFsRepository.TYPE - ) - ) - .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true); - } - - public void testRemoteClusterStateServiceNotInitialized_WhenNodeAttributesNotPresent() { - internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNodes(2); - - ensureStableCluster(3); - ensureGreen(); - - internalCluster().getDataOrClusterManagerNodeInstances(RemoteClusterStateService.class).forEach(Assert::assertNull); - } - - public void testServiceInitialized_WhenNodeAttributesPresent() { - internalCluster().startClusterManagerOnlyNode( - buildRemoteStateNodeAttributes(REPOSITORY_NAME, segmentRepoPath, ReloadableFsRepository.TYPE) - ); - internalCluster().startDataOnlyNodes( - 2, - buildRemoteStateNodeAttributes(REPOSITORY_NAME, segmentRepoPath, ReloadableFsRepository.TYPE) - ); - - ensureStableCluster(3); - ensureGreen(); - - internalCluster().getDataOrClusterManagerNodeInstances(RemoteClusterStateService.class).forEach(Assert::assertNotNull); - } - - public void testRemotePublishConfigNodeJoinNonRemoteCluster() throws Exception { - internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNodes(2); - - Settings.Builder build = remotePublishConfiguredNodeSetting(); - internalCluster().startClusterManagerOnlyNode(build.build()); - internalCluster().startDataOnlyNodes(2, build.build()); - - ensureStableCluster(6); - ensureGreen(); - } - - public void testRemotePublishConfigNodeJoinRemoteCluster() throws Exception { - // Remote Cluster without Routing table - setAddRemote(true); - internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNodes(2); - setAddRemote(false); - - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); - updateSettingsRequest.persistentSettings( - Settings.builder() - .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") - .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") - ); - assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - Settings.Builder build = remotePublishConfiguredNodeSetting(); - internalCluster().startClusterManagerOnlyNode(build.build()); - ensureStableCluster(4); - ensureGreen(); - } - - public void testRemoteNodeWithRoutingTableJoinRemoteCluster() throws Exception { - setAddRemote(true); - internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNodes(2); - setAddRemote(false); - - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); - updateSettingsRequest.persistentSettings( - Settings.builder() - .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") - .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") - ); - assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - - // Remote Repo with Routing table - Settings settings = remoteWithRoutingTableNodeSetting().build(); - internalCluster().startClusterManagerOnlyNode(settings); - ensureStableCluster(4); - ensureGreen(); - } - - public void testNonRemoteNodeJoinRemoteWithRoutingCluster() throws Exception { - Settings settings = remoteWithRoutingTableNodeSetting().build(); - internalCluster().startClusterManagerOnlyNode(settings); - internalCluster().startDataOnlyNodes(2, settings); - - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); - updateSettingsRequest.persistentSettings( - Settings.builder() - .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") - .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") - ); - assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - - internalCluster().startClusterManagerOnlyNode(); - ensureStableCluster(4); - ensureGreen(); - } - - public void testRemotePublishConfigNodeJoinRemoteWithRoutingCluster() throws Exception { - Settings settings = remoteWithRoutingTableNodeSetting().build(); - internalCluster().startClusterManagerOnlyNode(settings); - internalCluster().startDataOnlyNodes(2, settings); - - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); - updateSettingsRequest.persistentSettings( - Settings.builder() - .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") - .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") - ); - assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - - internalCluster().startClusterManagerOnlyNode(remotePublishConfiguredNodeSetting().build()); - - ensureStableCluster(4); - ensureGreen(); - } - - public void testNonRemoteNodeJoiningPublishConfigCluster() throws Exception { - Settings.Builder build = remotePublishConfiguredNodeSetting(); - internalCluster().startClusterManagerOnlyNode(build.build()); - internalCluster().startDataOnlyNodes(2, build.build()); - - internalCluster().startClusterManagerOnlyNode(); - - ensureStableCluster(4); - ensureGreen(); - } - - public void testRemoteNodeJoiningPublishConfigCluster() throws Exception { - Settings.Builder build = remotePublishConfiguredNodeSetting(); - internalCluster().startClusterManagerOnlyNode(build.build()); - internalCluster().startDataOnlyNodes(2, build.build()); - - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); - updateSettingsRequest.persistentSettings( - Settings.builder() - .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") - .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") - ); - assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - - setAddRemote(true); - internalCluster().startClusterManagerOnlyNode(); - ensureStableCluster(4); - ensureGreen(); - } - - public void testRemoteNodeWithRoutingTableJoiningPublishConfigCluster() throws Exception { - Settings.Builder build = remotePublishConfiguredNodeSetting(); - internalCluster().startClusterManagerOnlyNode(build.build()); - internalCluster().startDataOnlyNodes(2, build.build()); - - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); - updateSettingsRequest.persistentSettings( - Settings.builder() - .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") - .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") - ); - - Settings settings = Settings.builder() - .put( - buildRemoteStoreNodeAttributes( - REPOSITORY_NAME, - segmentRepoPath, - REPOSITORY_2_NAME, - translogRepoPath, - ROUTING_TABLE_REPO_NAME, - segmentRepoPath, - false - ) - ) - .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) - .build(); - assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - internalCluster().startClusterManagerOnlyNode(settings); - - ensureStableCluster(4); - ensureGreen(); - } } diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteRepositoryConfigurationIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteRepositoryConfigurationIT.java new file mode 100644 index 0000000000000..48afa85dc5691 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteRepositoryConfigurationIT.java @@ -0,0 +1,308 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.Plugin; +import org.opensearch.remotemigration.MigrationBaseTestCase; +import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.repositories.fs.ReloadableFsRepository; +import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.junit.Assert; +import org.junit.Before; + +import java.util.Collection; +import java.util.Locale; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +/** + * Tests the compatibility between types of nodes based on the configured repositories + * Non Remote node [No Repositories configured] + * Remote Publish Configured Node [Cluster State + Routing Table] + * Remote Node [Cluster State + Segment + Translog] + * Remote Node With Routing Table [Cluster State + Segment + Translog + Routing Table] + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteRepositoryConfigurationIT extends MigrationBaseTestCase { + private final String REMOTE_PRI_DOCREP_REP = "remote-primary-docrep-replica"; + + protected String remoteRepoPrefix = "remote_store"; + + @Override + protected Collection> nodePlugins() { + /* Adding the following mock plugins: + - InternalSettingsPlugin : To override default intervals of retention lease and global ckp sync + - MockFsRepositoryPlugin and MockTransportService.TestPlugin: To ensure remote interactions are not no-op and retention leases are properly propagated + */ + return Stream.concat( + super.nodePlugins().stream(), + Stream.of(InternalSettingsPlugin.class, MockFsRepositoryPlugin.class, MockTransportService.TestPlugin.class) + ).collect(Collectors.toList()); + } + + @Before + public void setUp() throws Exception { + if (segmentRepoPath == null || translogRepoPath == null) { + segmentRepoPath = randomRepoPath().toAbsolutePath(); + translogRepoPath = randomRepoPath().toAbsolutePath(); + } + super.setUp(); + } + + public Settings.Builder remotePublishConfiguredNodeSetting() { + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + remoteRepoPrefix, + REPOSITORY_NAME + ); + String prefixModeVerificationSuffix = BlobStoreRepository.PREFIX_MODE_VERIFICATION_SETTING.getKey(); + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + remoteRepoPrefix, + REPOSITORY_NAME + ); + String routingTableRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + remoteRepoPrefix, + ROUTING_TABLE_REPO_NAME + ); + String routingTableRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + remoteRepoPrefix, + ROUTING_TABLE_REPO_NAME + ); + + Settings.Builder builder = Settings.builder() + .put("node.attr." + remoteRepoPrefix + ".state.repository", REPOSITORY_NAME) + .put(stateRepoTypeAttributeKey, ReloadableFsRepository.TYPE) + .put(stateRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) + .put(stateRepoSettingsAttributeKeyPrefix + prefixModeVerificationSuffix, true) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .put("node.attr." + remoteRepoPrefix + ".routing_table.repository", ROUTING_TABLE_REPO_NAME) + .put(routingTableRepoTypeAttributeKey, ReloadableFsRepository.TYPE) + .put(routingTableRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath); + return builder; + } + + public Settings.Builder remoteWithRoutingTableNodeSetting() { + // Remote Cluster with Routing table + return Settings.builder() + .put( + buildRemoteStoreNodeAttributes( + REPOSITORY_NAME, + segmentRepoPath, + REPOSITORY_2_NAME, + translogRepoPath, + ROUTING_TABLE_REPO_NAME, + segmentRepoPath, + false + ) + ) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true); + } + + public void testRemoteClusterStateServiceNotInitialized_WhenNodeAttributesNotPresent() { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + + ensureStableCluster(3); + ensureGreen(); + + internalCluster().getDataOrClusterManagerNodeInstances(RemoteClusterStateService.class).forEach(Assert::assertNull); + } + + public void testServiceInitialized_WhenNodeAttributesPresent() { + internalCluster().startClusterManagerOnlyNode( + buildRemoteStateNodeAttributes(REPOSITORY_NAME, segmentRepoPath, ReloadableFsRepository.TYPE) + ); + internalCluster().startDataOnlyNodes( + 2, + buildRemoteStateNodeAttributes(REPOSITORY_NAME, segmentRepoPath, ReloadableFsRepository.TYPE) + ); + + ensureStableCluster(3); + ensureGreen(); + + internalCluster().getDataOrClusterManagerNodeInstances(RemoteClusterStateService.class).forEach(Assert::assertNotNull); + } + + public void testRemotePublishConfigNodeJoinNonRemoteCluster() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + + Settings.Builder build = remotePublishConfiguredNodeSetting(); + internalCluster().startClusterManagerOnlyNode(build.build()); + internalCluster().startDataOnlyNodes(2, build.build()); + + ensureStableCluster(6); + ensureGreen(); + } + + public void testRemotePublishConfigNodeJoinRemoteCluster() throws Exception { + // Remote Cluster without Routing table + setAddRemote(true); + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + setAddRemote(false); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + Settings.Builder build = remotePublishConfiguredNodeSetting(); + internalCluster().startClusterManagerOnlyNode(build.build()); + ensureStableCluster(4); + ensureGreen(); + } + + public void testRemoteNodeWithRoutingTableJoinRemoteCluster() throws Exception { + setAddRemote(true); + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + setAddRemote(false); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + // Remote Repo with Routing table + Settings settings = remoteWithRoutingTableNodeSetting().build(); + + internalCluster().startClusterManagerOnlyNode(settings); + ensureStableCluster(4); + ensureGreen(); + } + + public void testNonRemoteNodeJoinRemoteWithRoutingCluster() throws Exception { + Settings settings = remoteWithRoutingTableNodeSetting().build(); + internalCluster().startClusterManagerOnlyNode(settings); + internalCluster().startDataOnlyNodes(2, settings); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + internalCluster().startClusterManagerOnlyNode(); + ensureStableCluster(4); + ensureGreen(); + } + + public void testRemotePublishConfigNodeJoinRemoteWithRoutingCluster() throws Exception { + Settings settings = remoteWithRoutingTableNodeSetting().build(); + internalCluster().startClusterManagerOnlyNode(settings); + internalCluster().startDataOnlyNodes(2, settings); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + internalCluster().startClusterManagerOnlyNode(remotePublishConfiguredNodeSetting().build()); + + ensureStableCluster(4); + ensureGreen(); + } + + public void testNonRemoteNodeJoiningPublishConfigCluster() throws Exception { + Settings.Builder build = remotePublishConfiguredNodeSetting(); + internalCluster().startClusterManagerOnlyNode(build.build()); + internalCluster().startDataOnlyNodes(2, build.build()); + + internalCluster().startClusterManagerOnlyNode(); + + ensureStableCluster(4); + ensureGreen(); + } + + public void testRemoteNodeJoiningPublishConfigCluster() throws Exception { + Settings.Builder build = remotePublishConfiguredNodeSetting(); + internalCluster().startClusterManagerOnlyNode(build.build()); + internalCluster().startDataOnlyNodes(2, build.build()); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + setAddRemote(true); + internalCluster().startClusterManagerOnlyNode(); + ensureStableCluster(4); + ensureGreen(); + } + + public void testRemoteNodeWithRoutingTableJoiningPublishConfigCluster() throws Exception { + Settings.Builder build = remotePublishConfiguredNodeSetting(); + internalCluster().startClusterManagerOnlyNode(build.build()); + internalCluster().startDataOnlyNodes(2, build.build()); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + + Settings settings = Settings.builder() + .put( + buildRemoteStoreNodeAttributes( + REPOSITORY_NAME, + segmentRepoPath, + ReloadableFsRepository.TYPE, + REPOSITORY_2_NAME, + translogRepoPath, + FsRepository.TYPE, + ROUTING_TABLE_REPO_NAME, + segmentRepoPath, + ReloadableFsRepository.TYPE, + false + ) + ) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .build(); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + internalCluster().startClusterManagerOnlyNode(settings); + + ensureStableCluster(4); + ensureGreen(); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index d597b51c32ccd..ec30496a3f7ad 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -55,6 +55,7 @@ import org.opensearch.persistent.PersistentTasksCustomMetadata; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -69,7 +70,8 @@ import static org.opensearch.cluster.decommission.DecommissionHelper.nodeCommissioned; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_CLUSTER_PUBLICATION_REPO_NAME_ATTRIBUTES; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.getClusterStateRepoName; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.getRoutingTableRepoName; import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode; import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode.MIXED; import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode.STRICT; @@ -539,7 +541,12 @@ private static void ensureRemoteClusterStateNodesCompatibility(DiscoveryNode joi .findFirst(); if (remotePublicationNode.isPresent() && joiningNode.isRemoteStatePublicationEnabled()) { - ensureRepositoryCompatibility(joiningNode, remotePublicationNode.get(), REMOTE_CLUSTER_PUBLICATION_REPO_NAME_ATTRIBUTES); + List repos = Arrays.asList( + getClusterStateRepoName(remotePublicationNode.get().getAttributes()), + getRoutingTableRepoName(remotePublicationNode.get().getAttributes()) + ); + + ensureRepositoryCompatibility(joiningNode, remotePublicationNode.get(), repos); } } @@ -568,16 +575,12 @@ private static void ensureRemoteStoreNodesCompatibility(DiscoveryNode joiningNod List reposToSkip = new ArrayList<>(1); // find a remote node which has routing table configured Optional remoteRoutingTableNode = existingNodes.stream() - .filter( - node -> node.isRemoteStoreNode() - && node.getAttributes().get(RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY) != null - ) + .filter(node -> node.isRemoteStoreNode() && RemoteStoreNodeAttribute.getRoutingTableRepoName(node.getAttributes()) != null) .findFirst(); // If none of the existing nodes have routing table repo, then we skip this repo check if present in joining node. // This ensures a new node with remote routing table repo is able to join the cluster. if (remoteRoutingTableNode.isEmpty()) { - String joiningNodeRepoName = joiningNode.getAttributes() - .get(RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY); + String joiningNodeRepoName = getRoutingTableRepoName(joiningNode.getAttributes()); if (joiningNodeRepoName != null) { reposToSkip.add(joiningNodeRepoName); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 727a08b615050..232201d18ba13 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -1178,12 +1178,8 @@ public static void updateRemoteStoreSettings( .findFirst(); if (remoteNode.isPresent()) { - translogRepo = remoteNode.get() - .getAttributes() - .get(RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY); - segmentRepo = remoteNode.get() - .getAttributes() - .get(RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY); + translogRepo = RemoteStoreNodeAttribute.getTranslogRepoName(remoteNode.get().getAttributes()); + segmentRepo = RemoteStoreNodeAttribute.getSegmentRepoName(remoteNode.get().getAttributes()); if (segmentRepo != null && translogRepo != null) { settingsBuilder.put(SETTING_REMOTE_STORE_ENABLED, true) .put(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, segmentRepo) diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index 8c9a37a767ede..d84fb794c5e4f 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -45,6 +45,7 @@ import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import java.io.IOException; import java.util.Collections; @@ -62,10 +63,9 @@ import java.util.stream.Stream; import static org.opensearch.node.NodeRoleSettings.NODE_ROLES_SETTING; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isClusterStateRepoConfigured; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRoutingTableRepoConfigured; /** * A discovery node represents a node that is part of the cluster. @@ -510,8 +510,7 @@ public boolean isSearchNode() { * @return true if the node contains remote store node attributes, false otherwise */ public boolean isRemoteStoreNode() { - return this.getAttributes().keySet().stream().anyMatch(key -> key.equals(REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY)) - && this.getAttributes().keySet().stream().anyMatch(key -> key.equals(REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY)); + return isClusterStateRepoConfigured(this.getAttributes()) && RemoteStoreNodeAttribute.isSegmentRepoConfigured(this.getAttributes()); } /** @@ -519,11 +518,7 @@ public boolean isRemoteStoreNode() { * @return true if the node contains remote cluster state node attribute and remote routing table node attribute */ public boolean isRemoteStatePublicationEnabled() { - return this.getAttributes() - .keySet() - .stream() - .anyMatch(key -> (key.equals(REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY))) - && this.getAttributes().keySet().stream().anyMatch(key -> key.equals(REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY)); + return isClusterStateRepoConfigured(this.getAttributes()) && isRoutingTableRepoConfigured(this.getAttributes()); } /** @@ -587,13 +582,16 @@ public String toString() { sb.append('}'); } if (!attributes.isEmpty()) { - sb.append( - attributes.entrySet() - .stream() - .filter(entry -> !entry.getKey().startsWith(REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX)) // filter remote_store attributes - // from logging to reduce noise. - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) - ); + sb.append(attributes.entrySet().stream().filter(entry -> { + for (String prefix : REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX) { + if (entry.getKey().startsWith(prefix)) { + return false; + } + } + return true; + }) // filter remote_store attributes + // from logging to reduce noise. + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))); } return sb.toString(); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java index ea8f980c14972..eafbe05faf76f 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java @@ -33,7 +33,6 @@ import org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable; import org.opensearch.gateway.remote.routingtable.RemoteRoutingTableDiff; import org.opensearch.index.translog.transfer.BlobStoreTransferService; -import org.opensearch.node.Node; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -235,9 +234,7 @@ protected void doClose() throws IOException { @Override protected void doStart() { assert isRemoteRoutingTableConfigured(settings) == true : "Remote routing table is not enabled"; - final String remoteStoreRepo = settings.get( - Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY - ); + final String remoteStoreRepo = RemoteStoreNodeAttribute.getRoutingTableRepoName(settings); assert remoteStoreRepo != null : "Remote routing table repository is not configured"; final Repository repository = repositoriesService.get().repository(remoteStoreRepo); assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository"; diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index dc41189afc3cb..e4f4bae9bef7c 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -61,7 +61,6 @@ import org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata; import org.opensearch.gateway.remote.routingtable.RemoteRoutingTableDiff; import org.opensearch.index.translog.transfer.BlobStoreTransferService; -import org.opensearch.node.Node; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -1065,9 +1064,8 @@ public void close() throws IOException { public void start() { assert isRemoteClusterStateConfigured(settings) == true : "Remote cluster state is not enabled"; - final String remoteStoreRepo = settings.get( - Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY - ); + final String remoteStoreRepo = RemoteStoreNodeAttribute.getClusterStateRepoName(settings); + assert remoteStoreRepo != null : "Remote Cluster State repository is not configured"; final Repository repository = repositoriesService.get().repository(remoteStoreRepo); assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository"; diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java b/server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java index 2a76a5b966884..18b6d6184d1b0 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java @@ -26,7 +26,6 @@ import org.opensearch.gateway.remote.RemoteStateTransferException; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.indices.RemoteStoreSettings; -import org.opensearch.node.Node; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -70,11 +69,6 @@ public class RemoteIndexPathUploader extends IndexMetadataUploadListener { private static final String TIMEOUT_EXCEPTION_MSG = "Timed out waiting while uploading remote index path file for indexes=%s"; private static final String UPLOAD_EXCEPTION_MSG = "Exception occurred while uploading remote index paths for indexes=%s"; - static final String TRANSLOG_REPO_NAME_KEY = Node.NODE_ATTRIBUTES.getKey() - + RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; - static final String SEGMENT_REPO_NAME_KEY = Node.NODE_ATTRIBUTES.getKey() - + RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; - private static final Logger logger = LogManager.getLogger(RemoteIndexPathUploader.class); private final Settings settings; @@ -226,9 +220,8 @@ private void writePathToRemoteStore( } } - private Repository validateAndGetRepository(String repoSetting) { - final String repo = settings.get(repoSetting); - assert repo != null : "Remote " + repoSetting + " repository is not configured"; + private Repository validateAndGetRepository(String repo) { + assert repo != null : "Remote repository is not configured"; final Repository repository = repositoriesService.get().repository(repo); assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository"; return repository; @@ -240,15 +233,16 @@ public void start() { // If remote store data attributes are not present than we skip this. return; } - translogRepository = (BlobStoreRepository) validateAndGetRepository(TRANSLOG_REPO_NAME_KEY); - segmentRepository = (BlobStoreRepository) validateAndGetRepository(SEGMENT_REPO_NAME_KEY); + + translogRepository = (BlobStoreRepository) validateAndGetRepository(RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(settings)); + segmentRepository = (BlobStoreRepository) validateAndGetRepository(RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(settings)); } private boolean isTranslogSegmentRepoSame() { // TODO - The current comparison checks the repository name. But it is also possible that the repository are same // by attributes, but different by name. We need to handle this. - String translogRepoName = settings.get(TRANSLOG_REPO_NAME_KEY); - String segmentRepoName = settings.get(SEGMENT_REPO_NAME_KEY); + String translogRepoName = RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(settings); + String segmentRepoName = RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(settings); return Objects.equals(translogRepoName, segmentRepoName); } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java b/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java index cc51fcd2f18f6..1f9ffca4460b7 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java @@ -18,6 +18,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import java.util.List; import java.util.Map; @@ -30,8 +31,6 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.index.remote.RemoteStoreUtils.determineRemoteStoreCustomMetadataDuringMigration; import static org.opensearch.index.remote.RemoteStoreUtils.getRemoteStoreRepoName; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; /** * Utils for checking and mutating cluster state during remote migration @@ -74,8 +73,9 @@ public void maybeAddRemoteIndexSettings(IndexMetadata.Builder indexMetadataBuild index ); Map remoteRepoNames = getRemoteStoreRepoName(discoveryNodes); - String segmentRepoName = remoteRepoNames.get(REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY); - String tlogRepoName = remoteRepoNames.get(REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY); + String segmentRepoName = RemoteStoreNodeAttribute.getSegmentRepoName(remoteRepoNames); + String tlogRepoName = RemoteStoreNodeAttribute.getTranslogRepoName(remoteRepoNames); + assert Objects.nonNull(segmentRepoName) && Objects.nonNull(tlogRepoName) : "Remote repo names cannot be null"; Settings.Builder indexSettingsBuilder = Settings.builder().put(currentIndexSettings); updateRemoteStoreSettings(indexSettingsBuilder, segmentRepoName, tlogRepoName); diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java index b1b6259e4ca18..89d06753063b7 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java @@ -12,12 +12,14 @@ import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.node.Node; import org.opensearch.repositories.blobstore.BlobStoreRepository; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -35,10 +37,28 @@ */ public class RemoteStoreNodeAttribute { - public static final String REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX = "remote_store"; + public static final List REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX = List.of("remote_store", "remote_publication"); + + // TO-DO the string constants are used only for tests and can be moved to test package + public static final String REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.state.repository"; public static final String REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.segment.repository"; public static final String REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.translog.repository"; - public static final String REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.state.repository"; + public static final String REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.routing_table.repository"; + + public static final List REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS = REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX.stream() + .map(prefix -> prefix + ".state.repository") + .collect(Collectors.toList()); + + public static final List REMOTE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEYS = REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX.stream() + .map(prefix -> prefix + ".routing_table.repository") + .collect(Collectors.toList()); + public static final List REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS = REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX.stream() + .map(prefix -> prefix + ".segment.repository") + .collect(Collectors.toList()); + public static final List REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS = REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX.stream() + .map(prefix -> prefix + ".translog.repository") + .collect(Collectors.toList()); + public static final String REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT = "remote_store.repository.%s.type"; public static final String REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT = "remote_store.repository.%s." + CryptoMetadata.CRYPTO_METADATA_KEY; @@ -46,18 +66,19 @@ public class RemoteStoreNodeAttribute { + "." + CryptoMetadata.SETTINGS_KEY; public static final String REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX = "remote_store.repository.%s.settings."; - public static final String REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.routing_table.repository"; - private final RepositoriesMetadata repositoriesMetadata; + public static final String REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT = "%s.repository.%s.type"; + public static final String REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT = "%s.repository.%s." + CryptoMetadata.CRYPTO_METADATA_KEY; + public static final String REPOSITORY_CRYPTO_SETTINGS_PREFIX = REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT + + "." + + CryptoMetadata.SETTINGS_KEY; + public static final String REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX = "%s.repository.%s.settings."; - public static List SUPPORTED_DATA_REPO_NAME_ATTRIBUTES = List.of( - REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, - REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY - ); + private final RepositoriesMetadata repositoriesMetadata; - public static List REMOTE_CLUSTER_PUBLICATION_REPO_NAME_ATTRIBUTES = List.of( - REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, - REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY + public static List> SUPPORTED_DATA_REPO_NAME_ATTRIBUTES = Arrays.asList( + REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS, + REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS ); /** @@ -76,8 +97,17 @@ private String validateAttributeNonNull(DiscoveryNode node, String attributeKey) return attributeValue; } - private CryptoMetadata buildCryptoMetadata(DiscoveryNode node, String repositoryName) { - String metadataKey = String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT, repositoryName); + private Tuple validateAttributeNonNull(DiscoveryNode node, List attributeKeys) { + Tuple attributeValue = getValue(node.getAttributes(), attributeKeys); + if (attributeValue == null || attributeValue.v1() == null || attributeValue.v1().isEmpty()) { + throw new IllegalStateException("joining node [" + node + "] doesn't have the node attribute [" + attributeKeys.get(0) + "]"); + } + + return attributeValue; + } + + private CryptoMetadata buildCryptoMetadata(DiscoveryNode node, String repositoryName, String prefix) { + String metadataKey = String.format(Locale.getDefault(), REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT, prefix, repositoryName); boolean isRepoEncrypted = node.getAttributes().keySet().stream().anyMatch(key -> key.startsWith(metadataKey)); if (isRepoEncrypted == false) { return null; @@ -86,11 +116,7 @@ private CryptoMetadata buildCryptoMetadata(DiscoveryNode node, String repository String keyProviderName = validateAttributeNonNull(node, metadataKey + "." + CryptoMetadata.KEY_PROVIDER_NAME_KEY); String keyProviderType = validateAttributeNonNull(node, metadataKey + "." + CryptoMetadata.KEY_PROVIDER_TYPE_KEY); - String settingsAttributeKeyPrefix = String.format( - Locale.getDefault(), - REMOTE_STORE_REPOSITORY_CRYPTO_SETTINGS_PREFIX, - repositoryName - ); + String settingsAttributeKeyPrefix = String.format(Locale.getDefault(), REPOSITORY_CRYPTO_SETTINGS_PREFIX, prefix, repositoryName); Map settingsMap = node.getAttributes() .keySet() @@ -104,10 +130,11 @@ private CryptoMetadata buildCryptoMetadata(DiscoveryNode node, String repository return new CryptoMetadata(keyProviderName, keyProviderType, settings.build()); } - private Map validateSettingsAttributesNonNull(DiscoveryNode node, String repositoryName) { + private Map validateSettingsAttributesNonNull(DiscoveryNode node, String repositoryName, String prefix) { String settingsAttributeKeyPrefix = String.format( Locale.getDefault(), - REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + prefix, repositoryName ); Map settingsMap = node.getAttributes() @@ -125,17 +152,17 @@ private Map validateSettingsAttributesNonNull(DiscoveryNode node return settingsMap; } - private RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String name) { + private RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String name, String prefix) { String type = validateAttributeNonNull( node, - String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, name) + String.format(Locale.getDefault(), REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, prefix, name) ); - Map settingsMap = validateSettingsAttributesNonNull(node, name); + Map settingsMap = validateSettingsAttributesNonNull(node, name, prefix); Settings.Builder settings = Settings.builder(); settingsMap.forEach(settings::put); - CryptoMetadata cryptoMetadata = buildCryptoMetadata(node, name); + CryptoMetadata cryptoMetadata = buildCryptoMetadata(node, name, prefix); // Repository metadata built here will always be for a system repository. settings.put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true); @@ -144,53 +171,104 @@ private RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String na } private RepositoriesMetadata buildRepositoriesMetadata(DiscoveryNode node) { - Set repositoryNames = getValidatedRepositoryNames(node); + Map repositoryNamesWithPrefix = getValidatedRepositoryNames(node); List repositoryMetadataList = new ArrayList<>(); - for (String repositoryName : repositoryNames) { - repositoryMetadataList.add(buildRepositoryMetadata(node, repositoryName)); + for (Map.Entry repository : repositoryNamesWithPrefix.entrySet()) { + repositoryMetadataList.add(buildRepositoryMetadata(node, repository.getKey(), repository.getValue())); } return new RepositoriesMetadata(repositoryMetadataList); } - private Set getValidatedRepositoryNames(DiscoveryNode node) { - Set repositoryNames = new HashSet<>(); - if (node.getAttributes().containsKey(REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY) - || node.getAttributes().containsKey(REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY)) { - repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY)); - repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY)); - repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY)); - } else if (node.getAttributes().containsKey(REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY)) { - repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY)); + private static Tuple getValue(Map attributes, List keys) { + for (String key : keys) { + if (attributes.containsKey(key)) { + return new Tuple<>(attributes.get(key), key); + } } - if (node.getAttributes().containsKey(REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY)) { - repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY)); + return null; + } + + private Map getValidatedRepositoryNames(DiscoveryNode node) { + Set> repositoryNames = new HashSet<>(); + if (containsKey(node.getAttributes(), REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS) + || containsKey(node.getAttributes(), REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS)) { + repositoryNames.add(validateAttributeNonNull(node, REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS)); + repositoryNames.add(validateAttributeNonNull(node, REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS)); + repositoryNames.add(validateAttributeNonNull(node, REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS)); + } else if (containsKey(node.getAttributes(), REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS)) { + repositoryNames.add(validateAttributeNonNull(node, REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS)); + } + if (containsKey(node.getAttributes(), REMOTE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEYS)) { + repositoryNames.add(validateAttributeNonNull(node, REMOTE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEYS)); } - return repositoryNames; + Map repoNamesWithPrefix = new HashMap<>(); + repositoryNames.forEach(t -> { + String[] attrKeyParts = t.v2().split("\\."); + repoNamesWithPrefix.put(t.v1(), attrKeyParts[0]); + }); + + return repoNamesWithPrefix; } public static boolean isRemoteStoreAttributePresent(Settings settings) { - return settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX).isEmpty() == false; + for (String prefix : REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX) { + if (settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + prefix).isEmpty() == false) { + return true; + } + } + return false; } public static boolean isRemoteDataAttributePresent(Settings settings) { - return settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY).isEmpty() == false - || settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY).isEmpty() == false; + return isSegmentRepoConfigured(settings) || isTranslogRepoConfigured(settings); + } + + public static boolean isSegmentRepoConfigured(Settings settings) { + for (String prefix : REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS) { + if (settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + prefix).isEmpty() == false) { + return true; + } + } + return false; + } + + public static boolean isTranslogRepoConfigured(Settings settings) { + for (String prefix : REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS) { + if (settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + prefix).isEmpty() == false) { + return true; + } + } + return false; } public static boolean isRemoteClusterStateConfigured(Settings settings) { - return settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY) - .isEmpty() == false; + for (String prefix : REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS) { + if (settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + prefix).isEmpty() == false) { + return true; + } + } + return false; } public static String getRemoteStoreSegmentRepo(Settings settings) { - return settings.get(Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY); + for (String prefix : REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS) { + if (settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix) != null) { + return settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix); + } + } + return null; } public static String getRemoteStoreTranslogRepo(Settings settings) { - return settings.get(Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY); + for (String prefix : REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS) { + if (settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix) != null) { + return settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix); + } + } + return null; } public static boolean isRemoteStoreClusterStateEnabled(Settings settings) { @@ -198,8 +276,12 @@ public static boolean isRemoteStoreClusterStateEnabled(Settings settings) { } private static boolean isRemoteRoutingTableAttributePresent(Settings settings) { - return settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY) - .isEmpty() == false; + for (String prefix : REMOTE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEYS) { + if (settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + prefix).isEmpty() == false) { + return true; + } + } + return false; } public static boolean isRemoteRoutingTableConfigured(Settings settings) { @@ -219,21 +301,83 @@ public RepositoriesMetadata getRepositoriesMetadata() { public static Map getDataRepoNames(DiscoveryNode node) { assert remoteDataAttributesPresent(node.getAttributes()); Map dataRepoNames = new HashMap<>(); - for (String supportedRepoAttribute : SUPPORTED_DATA_REPO_NAME_ATTRIBUTES) { - dataRepoNames.put(supportedRepoAttribute, node.getAttributes().get(supportedRepoAttribute)); + for (List supportedRepoAttribute : SUPPORTED_DATA_REPO_NAME_ATTRIBUTES) { + Tuple value = getValue(node.getAttributes(), supportedRepoAttribute); + if (value != null && value.v1() != null) { + dataRepoNames.put(value.v2(), value.v1()); + } } return dataRepoNames; } private static boolean remoteDataAttributesPresent(Map nodeAttrs) { - for (String supportedRepoAttributes : SUPPORTED_DATA_REPO_NAME_ATTRIBUTES) { - if (nodeAttrs.get(supportedRepoAttributes) == null || nodeAttrs.get(supportedRepoAttributes).isEmpty()) { + for (List supportedRepoAttribute : SUPPORTED_DATA_REPO_NAME_ATTRIBUTES) { + Tuple value = getValue(nodeAttrs, supportedRepoAttribute); + if (value == null || value.v1() == null) { return false; } } return true; } + public static String getClusterStateRepoName(Map repos) { + return getValueFromAnyKey(repos, REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS); + } + + public static String getRoutingTableRepoName(Map repos) { + return getValueFromAnyKey(repos, REMOTE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEYS); + } + + public static String getSegmentRepoName(Map repos) { + return getValueFromAnyKey(repos, REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS); + } + + public static String getTranslogRepoName(Map repos) { + return getValueFromAnyKey(repos, REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS); + } + + private static String getValueFromAnyKey(Map repos, List keys) { + for (String key : keys) { + if (repos.get(key) != null) { + return repos.get(key); + } + } + return null; + } + + public static String getClusterStateRepoName(Settings settings) { + return getValueFromAnyKey(settings, REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS); + } + + public static String getRoutingTableRepoName(Settings settings) { + return getValueFromAnyKey(settings, REMOTE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEYS); + } + + private static String getValueFromAnyKey(Settings settings, List keys) { + for (String key : keys) { + if (settings.get(Node.NODE_ATTRIBUTES.getKey() + key) != null) { + return settings.get(Node.NODE_ATTRIBUTES.getKey() + key); + } + } + return null; + } + + public static boolean isClusterStateRepoConfigured(Map attributes) { + return containsKey(attributes, REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS); + } + + public static boolean isRoutingTableRepoConfigured(Map attributes) { + return containsKey(attributes, REMOTE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEYS); + } + + public static boolean isSegmentRepoConfigured(Map attributes) { + return containsKey(attributes, REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS); + } + + private static boolean containsKey(Map attributes, List keys) { + return keys.stream().filter(k -> attributes.containsKey(k)).findFirst().isPresent(); + } + @Override public int hashCode() { // The hashCode is generated by computing the hash of all the repositoryMetadata present in diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java index 98fcad0e6c496..f5b372ddd9b80 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java @@ -21,7 +21,6 @@ import org.opensearch.common.util.concurrent.AbstractAsyncTask; import org.opensearch.core.action.ActionListener; import org.opensearch.indices.RemoteStoreSettings; -import org.opensearch.node.Node; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -42,6 +41,8 @@ import java.util.function.Supplier; import java.util.stream.Collectors; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo; + /** * Service for managing pinned timestamps in a remote store. * This service handles pinning and unpinning of timestamps, as well as periodic updates of the pinned timestamps set. @@ -86,9 +87,7 @@ public void start() { } private static BlobContainer validateAndCreateBlobContainer(Settings settings, RepositoriesService repositoriesService) { - final String remoteStoreRepo = settings.get( - Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY - ); + final String remoteStoreRepo = getRemoteStoreSegmentRepo(settings); assert remoteStoreRepo != null : "Remote Segment Store repository is not configured"; final Repository repository = repositoriesService.repository(remoteStoreRepo); assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository"; diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java index 525a53f3e6158..6550ed39e8042 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java @@ -99,7 +99,7 @@ public void testRemoteStoreRedactionInToString() { roles, Version.CURRENT ); - assertFalse(node.toString().contains(RemoteStoreNodeAttribute.REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX)); + assertFalse(node.toString().contains(RemoteStoreNodeAttribute.REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX.get(0))); } public void testDiscoveryNodeIsCreatedWithHostFromInetAddress() throws Exception { diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteIndexPathUploaderTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteIndexPathUploaderTests.java index d6519d9db8ee6..2e6523a4a64a0 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteIndexPathUploaderTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteIndexPathUploaderTests.java @@ -80,11 +80,16 @@ public class RemoteIndexPathUploaderTests extends OpenSearchTestCase { private final AtomicLong successCount = new AtomicLong(); private final AtomicLong failureCount = new AtomicLong(); + static final String TRANSLOG_REPO_NAME_KEY = Node.NODE_ATTRIBUTES.getKey() + + RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; + static final String SEGMENT_REPO_NAME_KEY = Node.NODE_ATTRIBUTES.getKey() + + RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; + @Before public void setup() { settings = Settings.builder() - .put(RemoteIndexPathUploader.TRANSLOG_REPO_NAME_KEY, TRANSLOG_REPO_NAME) - .put(RemoteIndexPathUploader.SEGMENT_REPO_NAME_KEY, TRANSLOG_REPO_NAME) + .put(TRANSLOG_REPO_NAME_KEY, TRANSLOG_REPO_NAME) + .put(SEGMENT_REPO_NAME_KEY, TRANSLOG_REPO_NAME) .put(CLUSTER_STATE_REPO_KEY, TRANSLOG_REPO_NAME) .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) .build(); @@ -247,10 +252,7 @@ public void testInterceptWithSameRepo() throws IOException { } public void testInterceptWithDifferentRepo() throws IOException { - Settings settings = Settings.builder() - .put(this.settings) - .put(RemoteIndexPathUploader.SEGMENT_REPO_NAME_KEY, SEGMENT_REPO_NAME) - .build(); + Settings settings = Settings.builder().put(this.settings).put(SEGMENT_REPO_NAME_KEY, SEGMENT_REPO_NAME).build(); when(repositoriesService.repository(SEGMENT_REPO_NAME)).thenReturn(repository); RemoteIndexPathUploader remoteIndexPathUploader = new RemoteIndexPathUploader( threadPool, diff --git a/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java b/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java index de7f8977686a7..537a5a5739b75 100644 --- a/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java +++ b/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java @@ -32,12 +32,66 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REPOSITORY_CRYPTO_SETTINGS_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.test.RemoteStoreAttributeConstants.REMOTE_PUBLICATION_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.test.RemoteStoreAttributeConstants.REMOTE_PUBLICATION_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.test.RemoteStoreAttributeConstants.REMOTE_PUBLICATION_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; public class RemoteStoreNodeAttributeTests extends OpenSearchTestCase { static private final String KEY_ARN = "arn:aws:kms:us-east-1:123456789:key/6e9aa906-2cc3-4924-8ded-f385c78d9dcf"; static private final String REGION = "us-east-1"; + public void testCryptoMetadataForPublication() throws UnknownHostException { + String repoName = "remote-store-A"; + String prefix = "remote_publication"; + String repoTypeSettingKey = String.format(Locale.ROOT, REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, prefix, repoName); + String repoSettingsKey = String.format(Locale.ROOT, REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, prefix, repoName); + String repoCryptoMetadataKey = String.format(Locale.ROOT, REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT, prefix, repoName); + String repoCryptoMetadataSettingsKey = String.format(Locale.ROOT, REPOSITORY_CRYPTO_SETTINGS_PREFIX, prefix, repoName); + Map attr = Map.of( + REMOTE_PUBLICATION_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_PUBLICATION_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_PUBLICATION_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + repoTypeSettingKey, + "s3", + repoSettingsKey, + "abc", + repoSettingsKey + "base_path", + "xyz", + repoCryptoMetadataKey + ".key_provider_name", + "store-test", + repoCryptoMetadataKey + ".key_provider_type", + "aws-kms", + repoCryptoMetadataSettingsKey + ".region", + REGION, + repoCryptoMetadataSettingsKey + ".key_arn", + KEY_ARN + ); + DiscoveryNode node = new DiscoveryNode( + "C", + new TransportAddress(InetAddress.getByName("localhost"), 9876), + attr, + emptySet(), + Version.CURRENT + ); + + RemoteStoreNodeAttribute remoteStoreNodeAttribute = new RemoteStoreNodeAttribute(node); + assertEquals(remoteStoreNodeAttribute.getRepositoriesMetadata().repositories().size(), 1); + RepositoryMetadata repositoryMetadata = remoteStoreNodeAttribute.getRepositoriesMetadata().repositories().get(0); + Settings.Builder settings = Settings.builder(); + settings.put("region", REGION); + settings.put("key_arn", KEY_ARN); + CryptoMetadata cryptoMetadata = new CryptoMetadata("store-test", "aws-kms", settings.build()); + assertEquals(cryptoMetadata, repositoryMetadata.cryptoMetadata()); + } + public void testCryptoMetadata() throws UnknownHostException { String repoName = "remote-store-A"; String repoTypeSettingKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, repoName); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index e27ff311c06f6..1ee856d3092f0 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -2838,7 +2838,7 @@ private static Settings buildRemoteStoreNodeAttributes( ); } - private static Settings buildRemoteStoreNodeAttributes( + protected static Settings buildRemoteStoreNodeAttributes( String segmentRepoName, Path segmentRepoPath, String segmentRepoType, diff --git a/test/framework/src/main/java/org/opensearch/test/RemoteStoreAttributeConstants.java b/test/framework/src/main/java/org/opensearch/test/RemoteStoreAttributeConstants.java new file mode 100644 index 0000000000000..0e7ebb9f871f6 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/RemoteStoreAttributeConstants.java @@ -0,0 +1,19 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +public class RemoteStoreAttributeConstants { + + public static final String REMOTE_PUBLICATION_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_publication.state.repository"; + public static final String REMOTE_PUBLICATION_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_publication.segment.repository"; + public static final String REMOTE_PUBLICATION_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_publication.translog.repository"; + public static final String REMOTE_PUBLICATION_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY = + "remote_publication.routing_table.repository"; + +} From 3f18562974612ceb36b66c773b6b8eb6d4dfc0b0 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Thu, 14 Nov 2024 13:12:55 +0800 Subject: [PATCH 30/30] Bump google-auth-library-oauth2-http from 1.7.0 to 1.29.0 in /plugins/repository-gcs (#16520) * Bump from 1.7.0 to 1.29.0 in /plugins/repository-gcs Signed-off-by: Gao Binlong * Add change log Signed-off-by: Gao Binlong * move version to top-level file Signed-off-by: Gao Binlong * Fix build failure Signed-off-by: Gao Binlong * Fix test failure Signed-off-by: Gao Binlong * Revert some change Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong Signed-off-by: gaobinlong --- CHANGELOG.md | 1 + gradle/libs.versions.toml | 1 + plugins/repository-gcs/build.gradle | 12 +++++++----- .../google-auth-library-credentials-1.29.0.jar.sha1 | 1 + .../google-auth-library-credentials-1.7.0.jar.sha1 | 1 - .../google-auth-library-oauth2-http-1.29.0.jar.sha1 | 1 + .../google-auth-library-oauth2-http-1.7.0.jar.sha1 | 1 - .../gcs/GoogleCloudStorageServiceTests.java | 4 ++-- 8 files changed, 13 insertions(+), 9 deletions(-) create mode 100644 plugins/repository-gcs/licenses/google-auth-library-credentials-1.29.0.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/google-auth-library-credentials-1.7.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.29.0.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.7.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index c19019ece6c5c..0a5aca03abd0c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add new configuration setting `synonym_analyzer`, to the `synonym` and `synonym_graph` filters, enabling the specification of a custom analyzer for reading the synonym file ([#16488](https://github.com/opensearch-project/OpenSearch/pull/16488)). ### Dependencies +- Bump `google-auth-library-oauth2-http` from 1.7.0 to 1.29.0 in /plugins/repository-gcs ([#16520](https://github.com/opensearch-project/OpenSearch/pull/16520)) - Bump `com.azure:azure-storage-common` from 12.25.1 to 12.27.1 ([#16521](https://github.com/opensearch-project/OpenSearch/pull/16521)) - Bump `com.google.apis:google-api-services-compute` from v1-rev20240407-2.0.0 to v1-rev20241021-2.0.0 ([#16502](https://github.com/opensearch-project/OpenSearch/pull/16502), [#16548](https://github.com/opensearch-project/OpenSearch/pull/16548)) - Bump `com.azure:azure-storage-blob` from 12.23.0 to 12.28.1 ([#16501](https://github.com/opensearch-project/OpenSearch/pull/16501)) diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 8ff3f6e45397d..6b5c10e071d1e 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -24,6 +24,7 @@ guava = "32.1.1-jre" protobuf = "3.25.5" jakarta_annotation = "1.3.5" google_http_client = "1.44.1" +google_auth = "1.29.0" tdigest = "3.3" hdrhistogram = "2.2.2" grpc = "1.68.0" diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index b90bcc7f822d1..a44f6643c086a 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -47,10 +47,6 @@ opensearchplugin { classname 'org.opensearch.repositories.gcs.GoogleCloudStoragePlugin' } -versions << [ - 'google_auth': '1.7.0' -] - dependencies { api 'com.google.api:api-common:1.8.1' api 'com.google.api:gax:2.35.0' @@ -148,7 +144,6 @@ thirdPartyAudit { 'com.google.appengine.api.urlfetch.HTTPResponse', 'com.google.appengine.api.urlfetch.URLFetchService', 'com.google.appengine.api.urlfetch.URLFetchServiceFactory', - 'com.google.auth.oauth2.GdchCredentials', 'com.google.protobuf.util.JsonFormat', 'com.google.protobuf.util.JsonFormat$Parser', 'com.google.protobuf.util.JsonFormat$Printer', @@ -304,6 +299,13 @@ testClusters { all testClustersConfiguration } +/** + * Used for testing getting credentials from GCE + */ +test { + environment 'NO_GCE_CHECK', 'true' +} + /* * We only use a small amount of data in these tests, which means that the resumable upload path is not tested. We add * an additional test that forces the large blob threshold to be small to exercise the resumable upload path. diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-1.29.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-1.29.0.jar.sha1 new file mode 100644 index 0000000000000..e2f931a1e876f --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-credentials-1.29.0.jar.sha1 @@ -0,0 +1 @@ +19af4907301816d9328c1eb1fcc6dd05c8a0b544 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-1.7.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-1.7.0.jar.sha1 deleted file mode 100644 index f2e9a4f7283bf..0000000000000 --- a/plugins/repository-gcs/licenses/google-auth-library-credentials-1.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b29af5a9ea94e9e7f86bded11e39f5afda5b17e8 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.29.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.29.0.jar.sha1 new file mode 100644 index 0000000000000..98d0d1beda43d --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.29.0.jar.sha1 @@ -0,0 +1 @@ +2a42aead6cdc5d2cd22cdda1b9d7922e6135240f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.7.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.7.0.jar.sha1 deleted file mode 100644 index 738645d6b8c7b..0000000000000 --- a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -985d183303dbd4b7ceb348056e41e59677f6f74f \ No newline at end of file diff --git a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java index 58e412684ed5a..b620f212df413 100644 --- a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java +++ b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java @@ -242,7 +242,7 @@ public void testApplicationDefaultCredentialsWhenNoSettingProvided() throws Exce Exception exception = assertThrows(IOException.class, GoogleCredentials::getApplicationDefault); assertNotNull(storageOptions); assertNull(storageOptions.getCredentials()); - MatcherAssert.assertThat(exception.getMessage(), containsString("The Application Default Credentials are not available")); + MatcherAssert.assertThat(exception.getMessage(), containsString("Your default credentials were not found")); } /** @@ -254,7 +254,7 @@ public void testDefaultCredentialsThrowsExceptionWithoutGCStorageService() { GoogleCredentials credentials = googleApplicationDefaultCredentials.get(); assertNull(credentials); Exception exception = assertThrows(IOException.class, GoogleCredentials::getApplicationDefault); - MatcherAssert.assertThat(exception.getMessage(), containsString("The Application Default Credentials are not available")); + MatcherAssert.assertThat(exception.getMessage(), containsString("Your default credentials were not found")); } /**