diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index bb12121cd3d8f..40c8fb2e01dd7 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -11,7 +11,7 @@ # 3. Use the command palette to run the CODEOWNERS: Show owners of current file command, which will display all code owners for the current file. # Default ownership for all repo files -* @abbashus @adnapibar @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @ryanbogan @sachinpkale @saratvemulapalli @setiah @shwetathareja @sohami @tlfeng @VachaShah +* @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @tlfeng @VachaShah /modules/transport-netty4/ @peternied @@ -24,4 +24,4 @@ /.github/ @peternied -/MAINTAINERS.md @abbashus @adnapibar @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @ryanbogan @sachinpkale @saratvemulapalli @setiah @shwetathareja @sohami @tlfeng @VachaShah +/MAINTAINERS.md @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @tlfeng @VachaShah diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index 04e2ed5006269..cf9343c2c3aac 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -26,6 +26,19 @@ jobs: changeLogPath: 'CHANGELOG.md' continue-on-error: true - run: | + # The check was possibly skipped leading to success for both the jobs + if [[ ${{ steps.verify-changelog-3x.outcome }} == 'success' && ${{ steps.verify-changelog.outcome }} == 'success' ]]; then + exit 0 + fi + if [[ ${{ steps.verify-changelog-3x.outcome }} == 'failure' && ${{ steps.verify-changelog.outcome }} == 'failure' ]]; then + echo "error: Please ensure a changelog entry exists in CHANGELOG.md or CHANGELOG-3.0.md" + exit 1 + fi + + # Concatenates the labels and checks if the string contains "backport" + has_backport_label=${{ contains(join(github.event.pull_request.labels.*.name, ', '), 'backport')}} + if [[ ${{ steps.verify-changelog.outcome }} == 'success' && $has_backport_label == false ]]; then + echo "error: Please make sure that the PR has a backport label associated with it when making an entry to the CHANGELOG.md file" exit 1 fi diff --git a/CHANGELOG.md b/CHANGELOG.md index 68a2c746ec89b..a2d76b4a703f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Tiered Caching] Add dimension-based stats to ICache implementations. ([#12531](https://github.com/opensearch-project/OpenSearch/pull/12531)) - Add changes for overriding remote store and replication settings during snapshot restore. ([#11868](https://github.com/opensearch-project/OpenSearch/pull/11868)) - Add an individual setting of rate limiter for segment replication ([#12959](https://github.com/opensearch-project/OpenSearch/pull/12959)) +- [Streaming Indexing] Ensure support of the new transport by security plugin ([#13174](https://github.com/opensearch-project/OpenSearch/pull/13174)) +- Add cluster setting to dynamically configure the buckets for filter rewrite optimization. ([#13179](https://github.com/opensearch-project/OpenSearch/pull/13179)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) @@ -32,8 +34,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.commons:commonscodec` from 1.15 to 1.16.1 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) - Bump `org.apache.commons:commonslang` from 3.13.0 to 3.14.0 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) - Bump Apache Tika from 2.6.0 to 2.9.2 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) -- Bump `com.gradle.enterprise` from 3.16.2 to 3.17 ([#13116](https://github.com/opensearch-project/OpenSearch/pull/13116)) +- Bump `com.gradle.enterprise` from 3.16.2 to 3.17.1 ([#13116](https://github.com/opensearch-project/OpenSearch/pull/13116), [#13191](https://github.com/opensearch-project/OpenSearch/pull/13191)) - Bump `gradle/wrapper-validation-action` from 2 to 3 ([#13192](https://github.com/opensearch-project/OpenSearch/pull/13192)) +- Bump joda from 2.12.2 to 2.12.7 ([#13193](https://github.com/opensearch-project/OpenSearch/pull/13193)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 5535c2fa26eae..cce92167473b6 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -5,8 +5,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje ## Current Maintainers | Maintainer | GitHub ID | Affiliation | -|--------------------------| ------------------------------------------------------- | ----------- | -| Abbas Hussain | [abbashus](https://github.com/abbashus) | Meta | +| ------------------------ | ------------------------------------------------------- | ----------- | | Anas Alkouz | [anasalkouz](https://github.com/anasalkouz) | Amazon | | Andrew Ross | [andrross](https://github.com/andrross) | Amazon | | Andriy Redko | [reta](https://github.com/reta) | Aiven | @@ -15,16 +14,13 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Dan Widdis | [dbwiddis](https://github.com/dbwiddis) | Amazon | | Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock) | Amazon | | Gaurav Bafna | [gbbafna](https://github.com/gbbafna) | Amazon | -| Himanshu Setia | [setiah](https://github.com/setiah) | Amazon | | Kunal Kotwani | [kotwanikunal](https://github.com/kotwanikunal) | Amazon | | Marc Handalian | [mch2](https://github.com/mch2) | Amazon | | Michael Froh | [msfroh](https://github.com/msfroh) | Amazon | | Nick Knize | [nknize](https://github.com/nknize) | Amazon | | Owais Kazi | [owaiskazi19](https://github.com/owaiskazi19) | Amazon | | Peter Nied | [peternied](https://github.com/peternied) | Amazon | -| Rabi Panda | [adnapibar](https://github.com/adnapibar) | Independent | | Rishikesh Pasham | [Rishikesh1159](https://github.com/Rishikesh1159) | Amazon | -| Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon | | Sachin Kale | [sachinpkale](https://github.com/sachinpkale) | Amazon | | Sarat Vemulapalli | [saratvemulapalli](https://github.com/saratvemulapalli) | Amazon | | Shweta Thareja | [shwetathareja](https://github.com/shwetathareja) | Amazon | @@ -35,8 +31,12 @@ This document contains a list of maintainers in this repo. See [opensearch-proje ## Emeritus -| Maintainer | GitHub ID | Affiliation | -|-------------------------|---------------------------------------------|-------------| -| Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon | -| Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon | -| Kartik Ganesh | [kartg](https://github.com/kartg) | Amazon | +| Maintainer | GitHub ID | Affiliation | +| --------------------- | ----------------------------------------- | ----------- | +| Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon | +| Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon | +| Kartik Ganesh | [kartg](https://github.com/kartg) | Amazon | +| Abbas Hussain | [abbashus](https://github.com/abbashus) | Meta | +| Himanshu Setia | [setiah](https://github.com/setiah) | Amazon | +| Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon | +| Rabi Panda | [adnapibar](https://github.com/adnapibar) | Independent | diff --git a/TRIAGING.md b/TRIAGING.md index 3917f1e1442b9..bb04d49a66c54 100644 --- a/TRIAGING.md +++ b/TRIAGING.md @@ -14,9 +14,9 @@ Each meeting we seek to address all new issues. However, should we run out of ti ### How do I join the Triage meeting? -Meetings are hosted regularly at 10:00a - 10:55a Central Time every Wednesday and can be joined via [Chime](https://aws.amazon.com/chime/), with this [meeting link](https://chime.aws/1988437365). +Meetings are hosted regularly at 10:00a - 10:55a Central Time every Wednesday and can be joined via [Zoom](https://zoom.us/download), with this [meeting link](https://us02web.zoom.us/j/86287450465) and passcode `805212`. -After joining the Chime meeting, you can enable your video / voice to join the discussion. If you do not have a webcam or microphone available, you can still join in via the text chat. +After joining the Zoom meeting, you can enable your video / voice to join the discussion. If you do not have a webcam or microphone available, you can still join in via the text chat. If you have an issue you'd like to bring forth please prepare a link to the issue so it can be presented and viewed by everyone in the meeting. diff --git a/buildSrc/version.properties b/buildSrc/version.properties index c60f42dc1bb89..6e0d538460987 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.11.0-snapshot-8a555eb +lucene = 9.11.0-snapshot-fb97840 bundled_jdk_vendor = adoptium bundled_jdk = 21.0.2+13 @@ -27,7 +27,7 @@ jakarta_annotation = 1.3.5 jna = 5.13.0 netty = 4.1.108.Final -joda = 2.12.2 +joda = 2.12.7 # project reactor reactor_netty = 1.1.17 diff --git a/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java b/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java index e1ad55fe4b60b..470ab75b7930c 100644 --- a/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java +++ b/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java @@ -332,13 +332,20 @@ private void checkComment(Element element) { // Ignore classes annotated with @Generated and all enclosed elements in them. private boolean isGenerated(Element element) { - return element + final boolean isGenerated = element .getAnnotationMirrors() .stream() .anyMatch(m -> m .getAnnotationType() .toString() /* ClassSymbol.toString() returns class name */ .equalsIgnoreCase("javax.annotation.Generated")); + + if (!isGenerated && element.getEnclosingElement() != null) { + // check if enclosing element is generated + return isGenerated(element.getEnclosingElement()); + } + + return isGenerated; } private boolean hasInheritedJavadocs(Element element) { diff --git a/libs/core/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 b/libs/core/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index 25beb34873c0c..0000000000000 --- a/libs/core/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fe1cf5663be8bdb6aa757fd4101bc551684c90fb \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.11.0-snapshot-fb97840.jar.sha1 b/libs/core/licenses/lucene-core-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..88309bc46411a --- /dev/null +++ b/libs/core/licenses/lucene-core-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +53a828e3e88f55c83979cd3df0704617cc9edb9a \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-8a555eb.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index 82a17e2b79290..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -00759eaff8f62b38ba66a05f26ab784c268908d3 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-fb97840.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..f4da6e39aeeb8 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +ab914b48665f484315b78e4b6787aa42f5966bb6 \ No newline at end of file diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java index 7cc1a47a5d2a4..ef6b67ea44299 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java @@ -38,7 +38,6 @@ import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.tasks.Task; import org.opensearch.transport.NettyAllocator; -import org.opensearch.transport.netty4.ssl.TrustAllManager; import java.io.Closeable; import java.net.SocketAddress; @@ -90,6 +89,7 @@ import io.netty.handler.ssl.ClientAuth; import io.netty.handler.ssl.SslContextBuilder; import io.netty.handler.ssl.SslHandler; +import io.netty.handler.ssl.util.InsecureTrustManagerFactory; import io.netty.util.AttributeKey; import static io.netty.handler.codec.http.HttpHeaderNames.HOST; @@ -270,7 +270,7 @@ protected void initChannel(SocketChannel ch) throws Exception { final SslHandler sslHandler = new SslHandler( SslContextBuilder.forClient() .clientAuth(ClientAuth.NONE) - .trustManager(TrustAllManager.INSTANCE) + .trustManager(InsecureTrustManagerFactory.INSTANCE) .build() .newEngine(ch.alloc()) ); diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java index e79a066ad8f63..f80ad901ce765 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java @@ -41,7 +41,6 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.NettyAllocator; import org.opensearch.transport.SharedGroupFactory; -import org.opensearch.transport.netty4.ssl.TrustAllManager; import org.junit.After; import org.junit.Before; @@ -84,6 +83,7 @@ import io.netty.handler.codec.http.HttpUtil; import io.netty.handler.codec.http.HttpVersion; import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.util.InsecureTrustManagerFactory; import static org.opensearch.core.rest.RestStatus.BAD_REQUEST; import static org.opensearch.core.rest.RestStatus.OK; @@ -131,7 +131,7 @@ public Optional buildSecureHttpServerEngine(Settings settings, HttpSe keyManagerFactory.init(keyStore, "password".toCharArray()); SSLEngine engine = SslContextBuilder.forServer(keyManagerFactory) - .trustManager(TrustAllManager.INSTANCE) + .trustManager(InsecureTrustManagerFactory.INSTANCE) .build() .newEngine(NettyAllocator.getAllocator()); return Optional.of(engine); diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java index df3b005f40903..e0600aebd90e5 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java @@ -55,6 +55,7 @@ import io.netty.handler.ssl.ClientAuth; import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.util.InsecureTrustManagerFactory; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -87,7 +88,7 @@ public Optional buildSecureServerTransportEngine(Settings settings, T SSLEngine engine = SslContextBuilder.forServer(keyManagerFactory) .clientAuth(ClientAuth.NONE) - .trustManager(TrustAllManager.INSTANCE) + .trustManager(InsecureTrustManagerFactory.INSTANCE) .build() .newEngine(NettyAllocator.getAllocator()); return Optional.of(engine); @@ -103,7 +104,7 @@ public Optional buildSecureClientTransportEngine(Settings settings, S return Optional.of( SslContextBuilder.forClient() .clientAuth(ClientAuth.NONE) - .trustManager(TrustAllManager.INSTANCE) + .trustManager(InsecureTrustManagerFactory.INSTANCE) .build() .newEngine(NettyAllocator.getAllocator()) ); diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/TrustAllManager.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/TrustAllManager.java deleted file mode 100644 index a38c542b5780e..0000000000000 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/TrustAllManager.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.transport.netty4.ssl; - -import javax.net.ssl.X509TrustManager; - -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; - -public class TrustAllManager implements X509TrustManager { - public static final X509TrustManager INSTANCE = new TrustAllManager(); - - private TrustAllManager() {} - - public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException {} - - public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException {} - - public X509Certificate[] getAcceptedIssuers() { - return new X509Certificate[0]; - } -} diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index 0643f16dc1052..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -44a4e095d7e047a9452d81b224905b72c830f8ae \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-fb97840.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..594733c11402c --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +f9cd7bec33c8cf3b891976cb674ffe9c97f8747f \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index 3f4d49a78791b..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f57fe7322e6d3a9b4edcc3da0b1ee0791a814ec \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-fb97840.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..c46e06b8c87e4 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +c244a56bf7cd171a19379c96f1d20c477a34578d \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index 1f110011ca9c6..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9929da235100f8df323cfed165b8111fb2840093 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-fb97840.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..a79c34a127920 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +da26df43f2b0d7c2dfecbf208cae0772a5e382ca \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index 8d6bf9fa0fa1b..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8564c86d880c6ce002250002e2fd0936cbfff61d \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-fb97840.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..f2b08020be1ad --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +f752ffa5ee4697b04643214236138f3defdee2f4 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index cbe4aec98fae4..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e71777cfb5beb4ffd5b03030576d2f062eef13c \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-fb97840.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..969a05905eaf0 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +73fe44fe755aef72e7293b2ffdd934beb631429d \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index e1c7aecc104d0..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a597265bd6fb0a7e954e948a295d31507dd73cce \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-fb97840.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..fdf0bd39e217e --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +c2b48831b25e1c7e8f683a63d1505c2d133256d3 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index eefa2809f3540..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c9e534845bb08985d7fa21e2e71a14bc68c46089 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-fb97840.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..0042415700453 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +757f8b29f103f82b6fb6948634e93dd497c9d7a8 \ No newline at end of file diff --git a/plugins/cache-ehcache/build.gradle b/plugins/cache-ehcache/build.gradle index 65e7daaaacf26..4fc5e44f58c3a 100644 --- a/plugins/cache-ehcache/build.gradle +++ b/plugins/cache-ehcache/build.gradle @@ -95,3 +95,8 @@ test { // TODO: Adding permission in plugin-security.policy doesn't seem to work. systemProperty 'tests.security.manager', 'false' } + +internalClusterTest { + // TODO: Remove this later once we have a way. + systemProperty 'tests.security.manager', 'false' +} diff --git a/plugins/cache-ehcache/src/internalClusterTest/java/org/opensearch/cache/EhcacheDiskCacheIT.java b/plugins/cache-ehcache/src/internalClusterTest/java/org/opensearch/cache/EhcacheDiskCacheIT.java index c68455463ee3d..909a493c0734f 100644 --- a/plugins/cache-ehcache/src/internalClusterTest/java/org/opensearch/cache/EhcacheDiskCacheIT.java +++ b/plugins/cache-ehcache/src/internalClusterTest/java/org/opensearch/cache/EhcacheDiskCacheIT.java @@ -8,22 +8,62 @@ package org.opensearch.cache; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; +import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; +import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; +import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.SearchType; +import org.opensearch.cache.store.disk.EhcacheDiskCache; +import org.opensearch.cache.store.disk.EhcacheThreadLeakFilter; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.settings.CacheSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.cache.request.RequestCacheStats; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.indices.IndicesRequestCache; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.PluginInfo; +import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.hamcrest.OpenSearchAssertions; import org.junit.Assert; +import java.io.IOException; +import java.time.ZoneId; import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DEFAULT_CACHE_SIZE_IN_BYTES; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_CACHE_EXPIRE_AFTER_ACCESS_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_LISTENER_MODE_SYNC_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_MAX_SIZE_IN_BYTES_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_STORAGE_PATH_KEY; +import static org.opensearch.indices.IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING; +import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.greaterThan; + +@OpenSearchIntegTestCase.ClusterScope(numDataNodes = 0, scope = OpenSearchIntegTestCase.Scope.TEST) +@ThreadLeakFilters(filters = { EhcacheThreadLeakFilter.class }) public class EhcacheDiskCacheIT extends OpenSearchIntegTestCase { @Override @@ -31,7 +71,53 @@ protected Collection> nodePlugins() { return Arrays.asList(EhcacheCachePlugin.class); } + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.PLUGGABLE_CACHE, "true").build(); + } + + private Settings defaultSettings(long sizeInBytes, TimeValue expirationTime) { + if (expirationTime == null) { + expirationTime = TimeValue.MAX_VALUE; + } + try (NodeEnvironment env = newNodeEnvironment(Settings.EMPTY)) { + return Settings.builder() + .put( + EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(DISK_STORAGE_PATH_KEY) + .getKey(), + env.nodePaths()[0].indicesPath.toString() + "/" + UUID.randomUUID() + "/request_cache/" + ) + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ) + .put( + EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(DISK_LISTENER_MODE_SYNC_KEY) + .getKey(), + true + ) + .put( + EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(DISK_MAX_SIZE_IN_BYTES_KEY) + .getKey(), + sizeInBytes + ) + .put( + EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(DISK_CACHE_EXPIRE_AFTER_ACCESS_KEY) + .getKey(), + expirationTime + ) + .build(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + public void testPluginsAreInstalled() { + internalCluster().startNode(Settings.builder().put(defaultSettings(DEFAULT_CACHE_SIZE_IN_BYTES, null)).build()); NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); nodesInfoRequest.addMetric(NodesInfoRequest.Metric.PLUGINS.metricName()); NodesInfoResponse nodesInfoResponse = OpenSearchIntegTestCase.client().admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); @@ -45,4 +131,258 @@ public void testPluginsAreInstalled() { pluginInfos.stream().anyMatch(pluginInfo -> pluginInfo.getName().equals("org.opensearch.cache.EhcacheCachePlugin")) ); } + + public void testSanityChecksWithIndicesRequestCache() throws InterruptedException { + internalCluster().startNode(Settings.builder().put(defaultSettings(DEFAULT_CACHE_SIZE_IN_BYTES, null)).build()); + Client client = client(); + assertAcked( + client.admin() + .indices() + .prepareCreate("index") + .setMapping("f", "type=date") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .build() + ) + .get() + ); + indexRandom( + true, + client.prepareIndex("index").setSource("f", "2014-03-10T00:00:00.000Z"), + client.prepareIndex("index").setSource("f", "2014-05-13T00:00:00.000Z") + ); + ensureSearchable("index"); + + // This is not a random example: serialization with time zones writes shared strings + // which used to not work well with the query cache because of the handles stream output + // see #9500 + final SearchResponse r1 = client.prepareSearch("index") + .setSize(0) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .addAggregation( + dateHistogram("histo").field("f") + .timeZone(ZoneId.of("+01:00")) + .minDocCount(0) + .dateHistogramInterval(DateHistogramInterval.MONTH) + ) + .get(); + assertSearchResponse(r1); + + // The cached is actually used + assertThat( + client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), + greaterThan(0L) + ); + } + + public void testInvalidationWithIndicesRequestCache() throws Exception { + internalCluster().startNode( + Settings.builder() + .put(defaultSettings(DEFAULT_CACHE_SIZE_IN_BYTES, null)) + .put(INDICES_CACHE_CLEAN_INTERVAL_SETTING.getKey(), new TimeValue(1)) + .build() + ); + Client client = client(); + assertAcked( + client.admin() + .indices() + .prepareCreate("index") + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", -1) + ) + .get() + ); + int numberOfIndexedItems = randomIntBetween(5, 10); + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + indexRandom(true, client.prepareIndex("index").setSource("k" + iterator, "hello" + iterator)); + } + ensureSearchable("index"); + refreshAndWaitForReplication(); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); + OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); + long perQuerySizeInCacheInBytes = -1; + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + SearchResponse resp = client.prepareSearch("index") + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k" + iterator, "hello" + iterator)) + .get(); + if (perQuerySizeInCacheInBytes == -1) { + RequestCacheStats requestCacheStats = getRequestCacheStats(client, "index"); + perQuerySizeInCacheInBytes = requestCacheStats.getMemorySizeInBytes(); + } + assertSearchResponse(resp); + } + RequestCacheStats requestCacheStats = getRequestCacheStats(client, "index"); + assertEquals(numberOfIndexedItems, requestCacheStats.getMissCount()); + assertEquals(0, requestCacheStats.getHitCount()); + assertEquals(0, requestCacheStats.getEvictions()); + assertEquals(perQuerySizeInCacheInBytes * numberOfIndexedItems, requestCacheStats.getMemorySizeInBytes()); + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + SearchResponse resp = client.prepareSearch("index") + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k" + iterator, "hello" + iterator)) + .get(); + assertSearchResponse(resp); + } + requestCacheStats = getRequestCacheStats(client, "index"); + assertEquals(numberOfIndexedItems, requestCacheStats.getHitCount()); + assertEquals(numberOfIndexedItems, requestCacheStats.getMissCount()); + assertEquals(perQuerySizeInCacheInBytes * numberOfIndexedItems, requestCacheStats.getMemorySizeInBytes()); + assertEquals(0, requestCacheStats.getEvictions()); + // Explicit refresh would invalidate cache entries. + refreshAndWaitForReplication(); + assertBusy(() -> { + // Explicit refresh should clear up cache entries + assertTrue(getRequestCacheStats(client, "index").getMemorySizeInBytes() == 0); + }, 1, TimeUnit.SECONDS); + requestCacheStats = getRequestCacheStats(client, "index"); + assertEquals(0, requestCacheStats.getMemorySizeInBytes()); + // Hits and misses stats shouldn't get cleared up. + assertEquals(numberOfIndexedItems, requestCacheStats.getHitCount()); + assertEquals(numberOfIndexedItems, requestCacheStats.getMissCount()); + } + + public void testExplicitCacheClearWithIndicesRequestCache() throws Exception { + internalCluster().startNode( + Settings.builder() + .put(defaultSettings(DEFAULT_CACHE_SIZE_IN_BYTES, null)) + .put(INDICES_CACHE_CLEAN_INTERVAL_SETTING.getKey(), new TimeValue(1)) + .build() + ); + Client client = client(); + assertAcked( + client.admin() + .indices() + .prepareCreate("index") + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", -1) + ) + .get() + ); + int numberOfIndexedItems = randomIntBetween(5, 10); + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + indexRandom(true, client.prepareIndex("index").setSource("k" + iterator, "hello" + iterator)); + } + ensureSearchable("index"); + refreshAndWaitForReplication(); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); + OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); + + long perQuerySizeInCacheInBytes = -1; + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + SearchResponse resp = client.prepareSearch("index") + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k" + iterator, "hello" + iterator)) + .get(); + if (perQuerySizeInCacheInBytes == -1) { + RequestCacheStats requestCacheStats = getRequestCacheStats(client, "index"); + perQuerySizeInCacheInBytes = requestCacheStats.getMemorySizeInBytes(); + } + assertSearchResponse(resp); + } + RequestCacheStats requestCacheStats = getRequestCacheStats(client, "index"); + assertEquals(numberOfIndexedItems, requestCacheStats.getMissCount()); + assertEquals(0, requestCacheStats.getHitCount()); + assertEquals(0, requestCacheStats.getEvictions()); + assertEquals(perQuerySizeInCacheInBytes * numberOfIndexedItems, requestCacheStats.getMemorySizeInBytes()); + + // Explicit clear the cache. + ClearIndicesCacheRequest request = new ClearIndicesCacheRequest("index"); + ClearIndicesCacheResponse response = client.admin().indices().clearCache(request).get(); + assertNoFailures(response); + + assertBusy(() -> { + // All entries should get cleared up. + assertTrue(getRequestCacheStats(client, "index").getMemorySizeInBytes() == 0); + }, 1, TimeUnit.SECONDS); + } + + public void testEvictionsFlowWithExpirationTime() throws Exception { + internalCluster().startNode( + Settings.builder() + .put(defaultSettings(DEFAULT_CACHE_SIZE_IN_BYTES, new TimeValue(0))) // Immediately evict items after + // access + .put(INDICES_CACHE_CLEAN_INTERVAL_SETTING.getKey(), new TimeValue(1)) + .build() + ); + Client client = client(); + assertAcked( + client.admin() + .indices() + .prepareCreate("index") + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", -1) + ) + .get() + ); + int numberOfIndexedItems = 2;// randomIntBetween(5, 10); + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + indexRandom(true, client.prepareIndex("index").setSource("k" + iterator, "hello" + iterator)); + } + ensureSearchable("index"); + refreshAndWaitForReplication(); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); + OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); + + long perQuerySizeInCacheInBytes = -1; + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + SearchResponse resp = client.prepareSearch("index") + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k" + iterator, "hello" + iterator)) + .get(); + if (perQuerySizeInCacheInBytes == -1) { + RequestCacheStats requestCacheStats = getRequestCacheStats(client, "index"); + perQuerySizeInCacheInBytes = requestCacheStats.getMemorySizeInBytes(); + } + assertSearchResponse(resp); + } + RequestCacheStats requestCacheStats = getRequestCacheStats(client, "index"); + assertEquals(0, requestCacheStats.getHitCount()); + assertEquals(numberOfIndexedItems, requestCacheStats.getMissCount()); + assertEquals(perQuerySizeInCacheInBytes * numberOfIndexedItems, requestCacheStats.getMemorySizeInBytes()); + assertEquals(0, requestCacheStats.getEvictions()); + + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + SearchResponse resp = client.prepareSearch("index") + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k" + iterator, "hello" + iterator)) + .get(); + assertSearchResponse(resp); + } + requestCacheStats = getRequestCacheStats(client, "index"); + // Now that we have access the entries, they should expire after 1ms. So lets wait and verify that cache gets + // cleared up. + assertBusy(() -> { + // Explicit refresh should clear up cache entries + assertTrue(getRequestCacheStats(client, "index").getMemorySizeInBytes() == 0); + }, 10, TimeUnit.MILLISECONDS); + // Validate hit and miss count. + assertEquals(numberOfIndexedItems, requestCacheStats.getHitCount()); + assertEquals(numberOfIndexedItems, requestCacheStats.getMissCount()); + } + + private RequestCacheStats getRequestCacheStats(Client client, String indexName) { + return client.admin().indices().prepareStats(indexName).setRequestCache(true).get().getTotal().getRequestCache(); + } } diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java index 837fd6b268ce6..d173155080f6a 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java @@ -23,6 +23,11 @@ */ public class EhcacheDiskCacheSettings { + /** + * Default cache size in bytes ie 1gb. + */ + public static final long DEFAULT_CACHE_SIZE_IN_BYTES = 1073741824L; + /** * Ehcache disk write minimum threads for its pool * @@ -99,7 +104,7 @@ public class EhcacheDiskCacheSettings { */ public static final Setting.AffixSetting DISK_CACHE_MAX_SIZE_IN_BYTES_SETTING = Setting.suffixKeySetting( EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".max_size_in_bytes", - (key) -> Setting.longSetting(key, 1073741824L, NodeScope) + (key) -> Setting.longSetting(key, DEFAULT_CACHE_SIZE_IN_BYTES, NodeScope) ); /** diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java index d4a5a9ad83af6..bd1646d753016 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java @@ -8,6 +8,7 @@ package org.opensearch.http.reactor.netty4; +import org.opensearch.common.Nullable; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; @@ -23,21 +24,33 @@ import org.opensearch.http.HttpChannel; import org.opensearch.http.HttpReadTimeoutException; import org.opensearch.http.HttpServerChannel; +import org.opensearch.http.reactor.netty4.ssl.SslUtils; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.reactor.SharedGroupFactory; import org.opensearch.transport.reactor.netty4.Netty4Utils; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; +import javax.net.ssl.SSLSessionContext; + import java.net.InetSocketAddress; import java.net.SocketOption; import java.time.Duration; +import java.util.Arrays; +import java.util.List; +import io.netty.buffer.ByteBufAllocator; import io.netty.channel.ChannelOption; import io.netty.channel.socket.nio.NioChannelOption; import io.netty.handler.codec.http.DefaultLastHttpContent; import io.netty.handler.codec.http.FullHttpResponse; import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.ssl.ApplicationProtocolNegotiator; +import io.netty.handler.ssl.SslContext; import io.netty.handler.timeout.ReadTimeoutException; +import io.netty.util.ReferenceCountUtil; import org.reactivestreams.Publisher; import reactor.core.publisher.Mono; import reactor.core.scheduler.Scheduler; @@ -116,6 +129,7 @@ public class ReactorNetty4HttpServerTransport extends AbstractHttpServerTranspor private final ByteSizeValue maxInitialLineLength; private final ByteSizeValue maxHeaderSize; private final ByteSizeValue maxChunkSize; + private final SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider; private volatile SharedGroupFactory.SharedGroup sharedGroup; private volatile DisposableServer disposableServer; private volatile Scheduler scheduler; @@ -142,6 +156,45 @@ public ReactorNetty4HttpServerTransport( ClusterSettings clusterSettings, SharedGroupFactory sharedGroupFactory, Tracer tracer + ) { + this( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry, + dispatcher, + clusterSettings, + sharedGroupFactory, + null, + tracer + ); + } + + /** + * Creates new HTTP transport implementations based on Reactor Netty (see please {@link HttpServer}). + * @param settings settings + * @param networkService network service + * @param bigArrays big array allocator + * @param threadPool thread pool instance + * @param xContentRegistry XContent registry instance + * @param dispatcher dispatcher instance + * @param clusterSettings cluster settings + * @param sharedGroupFactory shared group factory + * @param secureHttpTransportSettingsProvider secure HTTP transport settings provider + * @param tracer tracer instance + */ + public ReactorNetty4HttpServerTransport( + Settings settings, + NetworkService networkService, + BigArrays bigArrays, + ThreadPool threadPool, + NamedXContentRegistry xContentRegistry, + Dispatcher dispatcher, + ClusterSettings clusterSettings, + SharedGroupFactory sharedGroupFactory, + @Nullable SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider, + Tracer tracer ) { super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings, tracer); Netty4Utils.setAvailableProcessors(OpenSearchExecutors.NODE_PROCESSORS_SETTING.get(settings)); @@ -152,6 +205,7 @@ public ReactorNetty4HttpServerTransport( this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings); + this.secureHttpTransportSettingsProvider = secureHttpTransportSettingsProvider; } /** @@ -160,7 +214,7 @@ public ReactorNetty4HttpServerTransport( */ @Override protected HttpServerChannel bind(InetSocketAddress socketAddress) throws Exception { - final HttpServer server = configureChannelOptions( + final HttpServer server = configure( HttpServer.create() .httpFormDecoder(builder -> builder.scheduler(scheduler)) .idleTimeout(Duration.ofMillis(connectTimeoutMillis)) @@ -173,7 +227,6 @@ protected HttpServerChannel bind(InetSocketAddress socketAddress) throws Excepti .maxHeaderSize(maxHeaderSize.bytesAsInt()) .maxInitialLineLength(maxInitialLineLength.bytesAsInt()) ) - .protocol(HttpProtocol.HTTP11, HttpProtocol.H2C) .handle((req, res) -> incomingRequest(req, res)) ); @@ -181,8 +234,8 @@ protected HttpServerChannel bind(InetSocketAddress socketAddress) throws Excepti return new ReactorNetty4HttpServerChannel(disposableServer.channel()); } - private HttpServer configureChannelOptions(final HttpServer server1) { - HttpServer configured = server1.childOption(ChannelOption.TCP_NODELAY, SETTING_HTTP_TCP_NO_DELAY.get(settings)) + private HttpServer configure(final HttpServer server) throws Exception { + HttpServer configured = server.childOption(ChannelOption.TCP_NODELAY, SETTING_HTTP_TCP_NO_DELAY.get(settings)) .childOption(ChannelOption.SO_KEEPALIVE, SETTING_HTTP_TCP_KEEP_ALIVE.get(settings)); if (SETTING_HTTP_TCP_KEEP_ALIVE.get(settings)) { @@ -229,6 +282,65 @@ private HttpServer configureChannelOptions(final HttpServer server1) { configured = configured.option(ChannelOption.SO_REUSEADDR, reuseAddress); configured = configured.childOption(ChannelOption.SO_REUSEADDR, reuseAddress); + // Configure SSL context if available + if (secureHttpTransportSettingsProvider != null) { + final SSLEngine engine = secureHttpTransportSettingsProvider.buildSecureHttpServerEngine(settings, this) + .orElseGet(SslUtils::createDefaultServerSSLEngine); + + try { + final List cipherSuites = Arrays.asList(engine.getEnabledCipherSuites()); + final List applicationProtocols = Arrays.asList(engine.getSSLParameters().getApplicationProtocols()); + + configured = configured.secure(spec -> spec.sslContext(new SslContext() { + @Override + public SSLSessionContext sessionContext() { + throw new UnsupportedOperationException(); /* server only, should never be called */ + } + + @Override + public SSLEngine newEngine(ByteBufAllocator alloc, String peerHost, int peerPort) { + throw new UnsupportedOperationException(); /* server only, should never be called */ + } + + @Override + public SSLEngine newEngine(ByteBufAllocator alloc) { + try { + return secureHttpTransportSettingsProvider.buildSecureHttpServerEngine( + settings, + ReactorNetty4HttpServerTransport.this + ).orElseGet(SslUtils::createDefaultServerSSLEngine); + } catch (final SSLException ex) { + throw new UnsupportedOperationException("Unable to create SSLEngine", ex); + } + } + + @Override + public boolean isClient() { + return false; /* server only */ + } + + @Override + public List cipherSuites() { + return cipherSuites; + } + + @Override + public ApplicationProtocolNegotiator applicationProtocolNegotiator() { + return new ApplicationProtocolNegotiator() { + @Override + public List protocols() { + return applicationProtocols; + } + }; + } + }).build()).protocol(HttpProtocol.HTTP11, HttpProtocol.H2); + } finally { + ReferenceCountUtil.release(engine); + } + } else { + configured = configured.protocol(HttpProtocol.HTTP11, HttpProtocol.H2C); + } + return configured; } @@ -302,6 +414,11 @@ protected void doStart() { } } + /** + * Exception handler + * @param channel HTTP channel + * @param cause exception occurred + */ @Override public void onException(HttpChannel channel, Exception cause) { if (cause instanceof ReadTimeoutException) { diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ssl/SslUtils.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ssl/SslUtils.java new file mode 100644 index 0000000000000..a0b4319c7c2d1 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ssl/SslUtils.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ +package org.opensearch.http.reactor.netty4.ssl; + +import org.opensearch.OpenSearchSecurityException; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; + +import java.security.NoSuchAlgorithmException; + +/** + * Helper class for creating default SSL engines + */ +public class SslUtils { + private static final String[] DEFAULT_SSL_PROTOCOLS = { "TLSv1.3", "TLSv1.2", "TLSv1.1" }; + + private SslUtils() {} + + /** + * Creates default server {@link SSLEngine} instance + * @return default server {@link SSLEngine} instance + */ + public static SSLEngine createDefaultServerSSLEngine() { + try { + final SSLEngine engine = SSLContext.getDefault().createSSLEngine(); + engine.setEnabledProtocols(DEFAULT_SSL_PROTOCOLS); + engine.setUseClientMode(false); + return engine; + } catch (final NoSuchAlgorithmException ex) { + throw new OpenSearchSecurityException("Unable to initialize default server SSL engine", ex); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ssl/package-info.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ssl/package-info.java new file mode 100644 index 0000000000000..95dbd2d6bd9ca --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ssl/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * SSL supporting utility classes + */ +package org.opensearch.http.reactor.netty4.ssl; diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java index dc310c3793109..6e5b0215b58a4 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java @@ -17,9 +17,11 @@ import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.HttpServerTransport.Dispatcher; import org.opensearch.http.reactor.netty4.ReactorNetty4HttpServerTransport; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; @@ -38,6 +40,11 @@ public class ReactorNetty4Plugin extends Plugin implements NetworkPlugin { */ public static final String REACTOR_NETTY_HTTP_TRANSPORT_NAME = "reactor-netty4"; + /** + * The name of new experimental secure HTTP transport implementations based on Reactor Netty. + */ + public static final String REACTOR_NETTY_SECURE_HTTP_TRANSPORT_NAME = "reactor-netty4-secure"; + private final SetOnce groupFactory = new SetOnce<>(); /** @@ -91,6 +98,53 @@ public Map> getHttpTransports( dispatcher, clusterSettings, getSharedGroupFactory(settings), + null, /* no security settings provider */ + tracer + ) + ); + } + + /** + * Returns a map of {@link HttpServerTransport} suppliers. + * See {@link org.opensearch.common.network.NetworkModule#HTTP_TYPE_SETTING} to configure a specific implementation. + * @param settings settings + * @param networkService network service + * @param bigArrays big array allocator + * @param pageCacheRecycler page cache recycler instance + * @param circuitBreakerService circuit breaker service instance + * @param threadPool thread pool instance + * @param xContentRegistry XContent registry instance + * @param dispatcher dispatcher instance + * @param clusterSettings cluster settings + * @param secureHttpTransportSettingsProvider secure HTTP transport settings provider + * @param tracer tracer instance + */ + @Override + public Map> getSecureHttpTransports( + Settings settings, + ThreadPool threadPool, + BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + Dispatcher dispatcher, + ClusterSettings clusterSettings, + SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider, + Tracer tracer + ) { + return Collections.singletonMap( + REACTOR_NETTY_SECURE_HTTP_TRANSPORT_NAME, + () -> new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry, + dispatcher, + clusterSettings, + getSharedGroupFactory(settings), + secureHttpTransportSettingsProvider, tracer ) ); diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java index 443ecd0f40ead..920c895205023 100644 --- a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java +++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java @@ -15,6 +15,7 @@ import org.opensearch.common.collect.Tuple; import org.opensearch.tasks.Task; +import org.opensearch.test.OpenSearchTestCase; import java.io.Closeable; import java.net.InetSocketAddress; @@ -38,10 +39,14 @@ import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpVersion; import io.netty.handler.codec.http2.HttpConversionUtil; +import io.netty.handler.ssl.ClientAuth; +import io.netty.handler.ssl.util.InsecureTrustManagerFactory; import io.netty.resolver.DefaultAddressResolverGroup; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import reactor.core.publisher.ParallelFlux; +import reactor.netty.http.Http11SslContextSpec; +import reactor.netty.http.Http2SslContextSpec; import reactor.netty.http.client.HttpClient; import static io.netty.handler.codec.http.HttpHeaderNames.HOST; @@ -50,8 +55,9 @@ /** * Tiny helper to send http requests over netty. */ -class ReactorHttpClient implements Closeable { +public class ReactorHttpClient implements Closeable { private final boolean compression; + private final boolean secure; static Collection returnHttpResponseBodies(Collection responses) { List list = new ArrayList<>(responses.size()); @@ -69,16 +75,21 @@ static Collection returnOpaqueIds(Collection responses return list; } - ReactorHttpClient(boolean compression) { + public ReactorHttpClient(boolean compression, boolean secure) { this.compression = compression; + this.secure = secure; } - static ReactorHttpClient create() { + public static ReactorHttpClient create() { return create(true); } - static ReactorHttpClient create(boolean compression) { - return new ReactorHttpClient(compression); + public static ReactorHttpClient create(boolean compression) { + return new ReactorHttpClient(compression, false); + } + + public static ReactorHttpClient https() { + return new ReactorHttpClient(true, true); } public List get(InetSocketAddress remoteAddress, String... uris) throws InterruptedException { @@ -92,7 +103,7 @@ public List get(InetSocketAddress remoteAddress, boolean order final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HTTP_1_1, HttpMethod.GET, uris[i]); httpRequest.headers().add(HOST, "localhost"); httpRequest.headers().add("X-Opaque-ID", String.valueOf(i)); - httpRequest.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); + httpRequest.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), secure ? "https" : "http"); requests.add(httpRequest); } @@ -148,7 +159,7 @@ private List processRequestsWithBody( request.headers().add(HttpHeaderNames.HOST, "localhost"); request.headers().add(HttpHeaderNames.CONTENT_LENGTH, content.readableBytes()); request.headers().add(HttpHeaderNames.CONTENT_TYPE, "application/json"); - request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); + request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), secure ? "https" : "http"); request.headers().add("X-Opaque-ID", String.valueOf(i)); requests.add(request); } @@ -162,12 +173,7 @@ private List sendRequests( ) { final NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup(1); try { - final HttpClient client = HttpClient.newConnection() - .resolver(DefaultAddressResolverGroup.INSTANCE) - .runOn(eventLoopGroup) - .host(remoteAddress.getHostString()) - .port(remoteAddress.getPort()) - .compress(compression); + final HttpClient client = createClient(remoteAddress, eventLoopGroup); @SuppressWarnings("unchecked") final Mono[] monos = requests.stream() @@ -201,6 +207,29 @@ private List sendRequests( } } + private HttpClient createClient(final InetSocketAddress remoteAddress, final NioEventLoopGroup eventLoopGroup) { + final HttpClient client = HttpClient.newConnection() + .resolver(DefaultAddressResolverGroup.INSTANCE) + .runOn(eventLoopGroup) + .host(remoteAddress.getHostString()) + .port(remoteAddress.getPort()) + .compress(compression); + + if (secure) { + return client.secure( + spec -> spec.sslContext( + OpenSearchTestCase.randomBoolean() + /* switch between HTTP 1.1/HTTP 2 randomly, both are supported */ ? Http11SslContextSpec.forClient() + .configure(s -> s.clientAuth(ClientAuth.NONE).trustManager(InsecureTrustManagerFactory.INSTANCE)) + : Http2SslContextSpec.forClient() + .configure(s -> s.clientAuth(ClientAuth.NONE).trustManager(InsecureTrustManagerFactory.INSTANCE)) + ) + ); + } + + return client; + } + @Override public void close() { diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ssl/SecureReactorNetty4HttpServerTransportTests.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ssl/SecureReactorNetty4HttpServerTransportTests.java new file mode 100644 index 0000000000000..ac7687d551766 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ssl/SecureReactorNetty4HttpServerTransportTests.java @@ -0,0 +1,595 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4.ssl; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.network.NetworkAddress; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.MockBigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.http.BindHttpException; +import org.opensearch.http.CorsHandler; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.HttpTransportSettings; +import org.opensearch.http.NullDispatcher; +import org.opensearch.http.reactor.netty4.ReactorHttpClient; +import org.opensearch.http.reactor.netty4.ReactorNetty4HttpServerTransport; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.NettyAllocator; +import org.opensearch.transport.reactor.SharedGroupFactory; +import org.junit.After; +import org.junit.Before; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.Optional; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import io.netty.bootstrap.Bootstrap; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.PoolArenaMetric; +import io.netty.buffer.PooledByteBufAllocator; +import io.netty.buffer.PooledByteBufAllocatorMetric; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandlerAdapter; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultHttpContent; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpUtil; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.util.InsecureTrustManagerFactory; + +import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; +import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +/** + * Tests for the secure {@link ReactorNetty4HttpServerTransport} class. + */ +public class SecureReactorNetty4HttpServerTransportTests extends OpenSearchTestCase { + + private NetworkService networkService; + private ThreadPool threadPool; + private MockBigArrays bigArrays; + private ClusterSettings clusterSettings; + private SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider; + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + threadPool = new TestThreadPool("test"); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + + secureHttpTransportSettingsProvider = new SecureHttpTransportSettingsProvider() { + @Override + public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { + return Optional.empty(); + } + + @Override + public Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException { + try { + SSLEngine engine = SslContextBuilder.forServer( + SecureReactorNetty4HttpServerTransportTests.class.getResourceAsStream("/certificate.crt"), + SecureReactorNetty4HttpServerTransportTests.class.getResourceAsStream("/certificate.key") + ).trustManager(InsecureTrustManagerFactory.INSTANCE).build().newEngine(NettyAllocator.getAllocator()); + return Optional.of(engine); + } catch (final IOException ex) { + throw new SSLException(ex); + } + } + }; + } + + @After + public void shutdown() throws Exception { + if (threadPool != null) { + threadPool.shutdownNow(); + } + threadPool = null; + networkService = null; + bigArrays = null; + clusterSettings = null; + } + + /** + * Test that {@link ReactorNetty4HttpServerTransport} supports the "Expect: 100-continue" HTTP header + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectContinueHeader() throws InterruptedException { + final Settings settings = createSettings(); + final int contentLength = randomIntBetween(1, HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings).bytesAsInt()); + runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.CONTINUE); + } + + /** + * Test that {@link ReactorNetty4HttpServerTransport} responds to a + * 100-continue expectation with too large a content-length + * with a 413 status. + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectContinueHeaderContentLengthTooLong() throws InterruptedException { + final String key = HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(); + final int maxContentLength = randomIntBetween(1, 104857600); + final Settings settings = createBuilderWithPort().put(key, maxContentLength + "b").build(); + final int contentLength = randomIntBetween(maxContentLength + 1, Integer.MAX_VALUE); + runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE); + } + + /** + * Test that {@link ReactorNetty4HttpServerTransport} responds to an unsupported expectation with a 417 status. + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectUnsupportedExpectation() throws InterruptedException { + Settings settings = createSettings(); + runExpectHeaderTest(settings, "chocolate=yummy", 0, HttpResponseStatus.EXPECTATION_FAILED); + } + + private void runExpectHeaderTest( + final Settings settings, + final String expectation, + final int contentLength, + final HttpResponseStatus expectedStatus + ) throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + channel.sendResponse(new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, new BytesArray("done"))); + } + + @Override + public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + }; + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(settings), + secureHttpTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + try (ReactorHttpClient client = ReactorHttpClient.https()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); + request.headers().set(HttpHeaderNames.EXPECT, expectation); + HttpUtil.setContentLength(request, contentLength); + + // Reactor Netty 4 does not expose 100 CONTINUE response but instead just asks for content + final HttpContent continuationRequest = new DefaultHttpContent(Unpooled.EMPTY_BUFFER); + final FullHttpResponse continuationResponse = client.send(remoteAddress.address(), request, continuationRequest); + try { + assertThat(continuationResponse.status(), is(HttpResponseStatus.OK)); + assertThat(new String(ByteBufUtil.getBytes(continuationResponse.content()), StandardCharsets.UTF_8), is("done")); + } finally { + continuationResponse.release(); + } + } + } + } + + public void testBindUnavailableAddress() { + Settings initialSettings = createSettings(); + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + initialSettings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + secureHttpTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + transport.start(); + TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + Settings settings = Settings.builder() + .put("http.port", remoteAddress.getPort()) + .put("network.host", remoteAddress.getAddress()) + .build(); + try ( + ReactorNetty4HttpServerTransport otherTransport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(settings), + secureHttpTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + BindHttpException bindHttpException = expectThrows(BindHttpException.class, otherTransport::start); + assertEquals("Failed to bind to " + NetworkAddress.format(remoteAddress.address()), bindHttpException.getMessage()); + } + } + } + + public void testBadRequest() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError(); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error("--> Unexpected bad request request"); + throw new AssertionError(cause); + } + }; + + final Settings settings; + final int maxInitialLineLength; + final Setting httpMaxInitialLineLengthSetting = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; + if (randomBoolean()) { + maxInitialLineLength = httpMaxInitialLineLengthSetting.getDefault(Settings.EMPTY).bytesAsInt(); + settings = createSettings(); + } else { + maxInitialLineLength = randomIntBetween(1, 8192); + settings = createBuilderWithPort().put(httpMaxInitialLineLengthSetting.getKey(), maxInitialLineLength + "b").build(); + } + + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(settings), + secureHttpTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (ReactorHttpClient client = ReactorHttpClient.https()) { + final String url = "/" + randomAlphaOfLength(maxInitialLineLength); + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.REQUEST_URI_TOO_LONG)); + assertThat(response.content().array().length, equalTo(0)); + } finally { + response.release(); + } + } + } + } + + public void testDispatchFailed() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + throw new RuntimeException("Bad things happen"); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error("--> Unexpected bad request request"); + throw new AssertionError(cause); + } + }; + + final Settings settings = createSettings(); + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(settings), + secureHttpTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (ReactorHttpClient client = ReactorHttpClient.https()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.INTERNAL_SERVER_ERROR)); + assertThat(response.content().array().length, equalTo(0)); + } finally { + response.release(); + } + } + } + } + + public void testLargeCompressedResponse() throws InterruptedException { + final String responseString = randomAlphaOfLength(4 * 1024 * 1024); + final String url = "/thing/"; + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + if (url.equals(request.uri())) { + channel.sendResponse(new BytesRestResponse(OK, responseString)); + } else { + logger.error("--> Unexpected successful uri [{}]", request.uri()); + throw new AssertionError(); + } + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + + }; + + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + secureHttpTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (ReactorHttpClient client = ReactorHttpClient.https()) { + DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); + request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, randomFrom("deflate", "gzip")); + long numOfHugeAllocations = getHugeAllocationCount(); + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(getHugeAllocationCount(), equalTo(numOfHugeAllocations)); + assertThat(response.status(), equalTo(HttpResponseStatus.OK)); + byte[] bytes = new byte[response.content().readableBytes()]; + response.content().readBytes(bytes); + assertThat(new String(bytes, StandardCharsets.UTF_8), equalTo(responseString)); + } finally { + response.release(); + } + } + } + } + + private long getHugeAllocationCount() { + long numOfHugAllocations = 0; + ByteBufAllocator allocator = NettyAllocator.getAllocator(); + assert allocator instanceof NettyAllocator.NoDirectBuffers; + ByteBufAllocator delegate = ((NettyAllocator.NoDirectBuffers) allocator).getDelegate(); + if (delegate instanceof PooledByteBufAllocator) { + PooledByteBufAllocatorMetric metric = ((PooledByteBufAllocator) delegate).metric(); + numOfHugAllocations = metric.heapArenas().stream().mapToLong(PoolArenaMetric::numHugeAllocations).sum(); + } + return numOfHugAllocations; + } + + public void testCorsRequest() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError(); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + + }; + + final Settings settings = createBuilderWithPort().put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "test-cors.org") + .build(); + + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + new SharedGroupFactory(settings), + secureHttpTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + // Test pre-flight request + try (ReactorHttpClient client = ReactorHttpClient.https()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.OPTIONS, "/"); + request.headers().add(CorsHandler.ORIGIN, "test-cors.org"); + request.headers().add(CorsHandler.ACCESS_CONTROL_REQUEST_METHOD, "POST"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.OK)); + assertThat(response.headers().get(CorsHandler.ACCESS_CONTROL_ALLOW_ORIGIN), equalTo("test-cors.org")); + assertThat(response.headers().get(CorsHandler.VARY), equalTo(CorsHandler.ORIGIN)); + assertTrue(response.headers().contains(CorsHandler.DATE)); + } finally { + response.release(); + } + } + + // Test short-circuited request + try (ReactorHttpClient client = ReactorHttpClient.https()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + request.headers().add(CorsHandler.ORIGIN, "google.com"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.FORBIDDEN)); + } finally { + response.release(); + } + } + } + } + + public void testConnectTimeout() throws Exception { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError("Should not have received a dispatched request"); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError("Should not have received a dispatched request"); + } + + }; + + Settings settings = createBuilderWithPort().put( + HttpTransportSettings.SETTING_HTTP_CONNECT_TIMEOUT.getKey(), + new TimeValue(randomIntBetween(100, 300)) + ).build(); + + NioEventLoopGroup group = new NioEventLoopGroup(); + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + new SharedGroupFactory(settings), + secureHttpTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + final CountDownLatch channelClosedLatch = new CountDownLatch(1); + + final Bootstrap clientBootstrap = new Bootstrap().option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) + .channel(NioSocketChannel.class) + .handler(new ChannelInitializer() { + + @Override + protected void initChannel(SocketChannel ch) { + ch.pipeline().addLast(new ChannelHandlerAdapter() { + }); + + } + }) + .group(group); + ChannelFuture connect = clientBootstrap.connect(remoteAddress.address()); + connect.channel().closeFuture().addListener(future -> channelClosedLatch.countDown()); + + assertTrue("Channel should be closed due to read timeout", channelClosedLatch.await(1, TimeUnit.MINUTES)); + + } finally { + group.shutdownGracefully().await(); + } + } + + private Settings createSettings() { + return createBuilderWithPort().build(); + } + + private Settings.Builder createBuilderWithPort() { + return Settings.builder().put(HttpTransportSettings.SETTING_HTTP_PORT.getKey(), getPortRange()); + } +} diff --git a/plugins/transport-reactor-netty4/src/test/resources/README.txt b/plugins/transport-reactor-netty4/src/test/resources/README.txt new file mode 100644 index 0000000000000..a4353cee45a97 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/test/resources/README.txt @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +# +# This is README describes how the certificates in this directory were created. +# This file can also be executed as a script +# + +# 1. Create certificate key + +openssl req -x509 -sha256 -newkey rsa:2048 -keyout certificate.key -out certificate.crt -days 1024 -nodes + +# 2. Export the certificate in pkcs12 format + +openssl pkcs12 -export -in certificate.crt -inkey certificate.key -out server.p12 -name netty4-secure -password pass:password + diff --git a/plugins/transport-reactor-netty4/src/test/resources/certificate.crt b/plugins/transport-reactor-netty4/src/test/resources/certificate.crt new file mode 100644 index 0000000000000..54c78fdbcf6de --- /dev/null +++ b/plugins/transport-reactor-netty4/src/test/resources/certificate.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDkzCCAnugAwIBAgIUddAawr5zygcd+Dcn9WVDpO4BJ7YwDQYJKoZIhvcNAQEL +BQAwWTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDESMBAGA1UEAwwJbG9jYWxob3N0MB4X +DTI0MDMxNDE5NDQzOVoXDTI3MDEwMjE5NDQzOVowWTELMAkGA1UEBhMCQVUxEzAR +BgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5 +IEx0ZDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAzjOKkg6Iba5zfZ8b/RYw+PGmGEfbdGuuF10Wz4Jmx/Nk4VfDLxdh +TW8VllUL2JD7uPkjABj7pW3awAbvIJ+VGbKqfBr1Nsz0mPPzhT8cfuMH/FDZgQs3 +4HuqDKr0LfC1Kw5E3WF0GVMBDNu0U+nKoeqySeYjGdxDnd3W4cqK5AnUxL0RnIny +Bw7ZuhcU55XndH/Xauro/2EpvJduDsWMdqt7ZfIf1TOmaiQHK+82yb/drVaJbczK +uTpn1Kv2bnzkQEckgq+z1dLNOOyvP2xf+nsziw5ilJe92e5GJOUJYFAlEgUAGpfD +dv6j/gTRYvdJCJItOQEQtektNCAZsoc0wwIDAQABo1MwUTAdBgNVHQ4EFgQUzHts +wIt+zhB/R4U4Do2P6rr0YhkwHwYDVR0jBBgwFoAUzHtswIt+zhB/R4U4Do2P6rr0 +YhkwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAveh870jJX7vt +oLCrdugsyo79pR4f7Nr1kUy3jJrfoaoUmrjiiiHWgT22fGwp7j1GZF2mVfo8YVaK +63YNn5gB2NNZhguPOFC4AdvHRYOKRBOaOvWK8oq7BcJ//18JYI/pPnpgkYvJjqv4 +gFKaZX9qWtujHpAmKiVGs7pwYGNXfixPHRNV4owcfHMIH5dhbbqT49j94xVpjbXs +OymKtFl4kpCE/0LzKFrFcuu55Am1VLBHx2cPpHLOipgUcF5BHFlQ8AXiCMOwfPAw +d22mLB6Gt1oVEpyvQHYd3e04FetEXQ9E8T+NKWZx/8Ucf+IWBYmZBRxch6O83xgk +bAbGzqkbzQ== +-----END CERTIFICATE----- diff --git a/plugins/transport-reactor-netty4/src/test/resources/certificate.key b/plugins/transport-reactor-netty4/src/test/resources/certificate.key new file mode 100644 index 0000000000000..228350180935d --- /dev/null +++ b/plugins/transport-reactor-netty4/src/test/resources/certificate.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDOM4qSDohtrnN9 +nxv9FjD48aYYR9t0a64XXRbPgmbH82ThV8MvF2FNbxWWVQvYkPu4+SMAGPulbdrA +Bu8gn5UZsqp8GvU2zPSY8/OFPxx+4wf8UNmBCzfge6oMqvQt8LUrDkTdYXQZUwEM +27RT6cqh6rJJ5iMZ3EOd3dbhyorkCdTEvRGcifIHDtm6FxTnled0f9dq6uj/YSm8 +l24OxYx2q3tl8h/VM6ZqJAcr7zbJv92tVoltzMq5OmfUq/ZufORARySCr7PV0s04 +7K8/bF/6ezOLDmKUl73Z7kYk5QlgUCUSBQAal8N2/qP+BNFi90kIki05ARC16S00 +IBmyhzTDAgMBAAECggEAVOdiElvLjyX6xeoC00YU6hxOIMdNtHU2HMamwtDV01UD +38mMQ9KjrQelYt4n34drLrHe2IZw75/5J4JzagJrmUY47psHBwaDXItuZRokeJaw +zhLYTEs7OcKRtV+a5WOspUrdzi33aQoFb67zZG3qkpsZyFXrdBV+/fy/Iv+MCvLH +xR0jQ5mzE3cw20R7S4nddChBA/y8oKGOo6QRf2SznC1jL/+yolHvJPEn1v8AUxYm +BMPHxj1O0c4M4IxnJQ3Y5Jy9OaFMyMsFlF1hVhc/3LDDxDyOuBsVsFDicojyrRea +GKngIke0yezy7Wo4NUcp8YQhafonpWVsSJJdOUotcQKBgQD0rihFBXVtcG1d/Vy7 +FvLHrmccD56JNV744LSn2CDM7W1IulNbDUZINdCFqL91u5LpxozeE1FPY1nhwncJ +N7V7XYCaSLCuV1YJzRmUCjnzk2RyopGpzWog3f9uUFGgrk1HGbNAv99k/REya6Iu +IRSkuQhaJOj3bRXzonh0K4GjewKBgQDXvamtCioOUMSP8vq919YMkBw7F+z/fr0p +pamO8HL9eewAUg6N92JQ9kobSo/GptdmdHIjs8LqnS5C3H13GX5Qlf5GskOlCpla +V55ElaSp0gvKwWE168U7gQH4etPQAXXJrOGFaGbPj9W81hTUud7HVE88KYdfWTBo +I7TuE25tWQKBgBRjcr2Vn9xXsvVTCGgamG5lLPhcoNREGz7X0pXt34XT/vhBdnKu +331i5pZMom+YCrzqK5DRwUPBPpseTjb5amj2OKIijn5ojqXQbmI0m/GdBZC71TF2 +CXLlrMQvcy3VeGEFVjd+BYpvwAAYkfIQFZ1IQdbpHnSHpX2guzLK8UmDAoGBANUy +PIcf0EetUVHfkCIjNQfdMcjD8BTcLhsF9vWmcDxFTA9VB8ULf0D64mjt2f85yQsa +b+EQN8KZ6alxMxuLOeRxFYLPj0F9o+Y/R8wHBV48kCKhz2r1v0b6SfQ/jSm1B61x +BrxLW64qOdIOzS8bLyhUDKkrcPesr8V548aRtUKhAoGBAKlNJFd8BCGKD9Td+3dE +oP1iHTX5XZ+cQIqL0e+GMQlK4HnQP566DFZU5/GHNNAfmyxd5iSRwhTqPMHRAmOb +pqQwsyufx0dFeIBxeSO3Z6jW5h2sl4nBipZpw9bzv6EBL1xRr0SfMNZzdnf4JFzc +0htGo/VO93Z2pv8w7uGUz1nN +-----END PRIVATE KEY----- diff --git a/server/build.gradle b/server/build.gradle index cb48142a61159..a076a6bee36bf 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -411,14 +411,21 @@ tasks.register("downloadSnapshot", Copy) { def groupId = "org.opensearch" def artifactId = "opensearch" - repositories { - maven { - url mavenSnapshotRepoUrl - } - } + def repos = project.getRepositories(); + MavenArtifactRepository opensearchRepo = repos.maven(repo -> { + repo.setName("opensearch-snapshots"); + repo.setUrl(mavenSnapshotRepoUrl); + }); + + repos.exclusiveContent(exclusiveRepo -> { + exclusiveRepo.filter(descriptor -> descriptor.includeGroup(groupId)); + exclusiveRepo.forRepositories(opensearchRepo); + }); configurations { - snapshotArtifact + snapshotArtifact { + exclude group: 'org.apache.lucene' + } } dependencies { diff --git a/server/licenses/joda-time-2.12.2.jar.sha1 b/server/licenses/joda-time-2.12.2.jar.sha1 deleted file mode 100644 index 6e9b28eb35597..0000000000000 --- a/server/licenses/joda-time-2.12.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -78e18a7b4180e911dafba0a412adfa82c1e3d14b \ No newline at end of file diff --git a/server/licenses/joda-time-2.12.7.jar.sha1 b/server/licenses/joda-time-2.12.7.jar.sha1 new file mode 100644 index 0000000000000..7ce5c501873c0 --- /dev/null +++ b/server/licenses/joda-time-2.12.7.jar.sha1 @@ -0,0 +1 @@ +d015b997eccd511e5567218a51651ff0625f6f25 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-analysis-common-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index 9cab77f4e7394..0000000000000 --- a/server/licenses/lucene-analysis-common-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6f0cfa07a5e4b36423e398cd1fd51c6825773d9c \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-analysis-common-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..f229c373aa1af --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +cd0afb5da5dcb4c7498bd1ee7f7bab0e289404b8 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-backward-codecs-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index 2d5d1a281a0f0..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ab201b997c8449db1ecd2fa88bd42d2f457286fa \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-backward-codecs-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..edaf28a7f6e76 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +88888315cd60e565960ae2e6fed2af0df077a2a2 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index 25beb34873c0c..0000000000000 --- a/server/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fe1cf5663be8bdb6aa757fd4101bc551684c90fb \ No newline at end of file diff --git a/server/licenses/lucene-core-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-core-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..88309bc46411a --- /dev/null +++ b/server/licenses/lucene-core-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +53a828e3e88f55c83979cd3df0704617cc9edb9a \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-grouping-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index 27d4f0f5874e9..0000000000000 --- a/server/licenses/lucene-grouping-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a988f92842e48195c75a49377432533c9170d93d \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-grouping-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..13f1276e3b033 --- /dev/null +++ b/server/licenses/lucene-grouping-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +d1f54a816c9d85e890a862a2dffdc734ece2770c \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-highlighter-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index 2545822f2ac7b..0000000000000 --- a/server/licenses/lucene-highlighter-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7c669e2c01565d3bdf175cd61a1e4d0bdfc44311 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-highlighter-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..6cef51ac4453f --- /dev/null +++ b/server/licenses/lucene-highlighter-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +1e7c9336fa86fb866fcd76ea5d6283c804b4d580 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-join-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index fe44ad772335f..0000000000000 --- a/server/licenses/lucene-join-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -633a6d44b4cde8e149daa3407e8b8f644eece951 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-join-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..2524ac35c2afe --- /dev/null +++ b/server/licenses/lucene-join-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +17be4fc1f9feca0dac84a37d54dca4b32df4c619 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-memory-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index 85bfbe066ff56..0000000000000 --- a/server/licenses/lucene-memory-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -25390259c9e5592354efbc2f250bb396402016b2 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-memory-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..f5ef377300839 --- /dev/null +++ b/server/licenses/lucene-memory-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +7350675a2cf386c0f003b667b61db614f03bb984 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-misc-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index 4dba5837b66de..0000000000000 --- a/server/licenses/lucene-misc-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -056f87a3d30c223b08d2f45fe465ddf11210b85f \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-misc-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..e94fcf0f259a1 --- /dev/null +++ b/server/licenses/lucene-misc-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +c5c8bd120d5985ab6bd4e5f89efe08c226c0a323 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-queries-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index b6b8c441eefb1..0000000000000 --- a/server/licenses/lucene-queries-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e640f850b4fb13190be8422fe74c14c9d6603bb5 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-queries-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..fc80394195fa9 --- /dev/null +++ b/server/licenses/lucene-queries-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +2e1975ac26e9172722f734bf0f5583317e5eb16a \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-queryparser-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index d0e77b04db51a..0000000000000 --- a/server/licenses/lucene-queryparser-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8eb57762bf408fa51d7511f5e3b917627be61d1d \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-queryparser-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..10ef577bc1bdc --- /dev/null +++ b/server/licenses/lucene-queryparser-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +c8cf3d5dd4d0538b38e4e88bb865bc59d835d887 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-sandbox-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index 179df9f07a594..0000000000000 --- a/server/licenses/lucene-sandbox-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -615b4a90c5402084c2d5916a4c1fadc9d9177782 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-sandbox-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..08a61ba30bc0d --- /dev/null +++ b/server/licenses/lucene-sandbox-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +5a3a7a138ff4978f3ddb186d9786e6cb4793b291 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-spatial-extras-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index 9b88b24c21b12..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19b42cdb5f76f63dece3ef5128207ebdd3741d48 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-spatial-extras-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..a244219c1de60 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +dc71e0125c66d29a1bffc1ddeab4b96526e737c8 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-spatial3d-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index 789ab1d52ea8c..0000000000000 --- a/server/licenses/lucene-spatial3d-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -882691fe917e716fe6bcf8c0dd984b153495d015 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-spatial3d-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..d2b3821bbf5f6 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +aef4c04d353092a438eee302521fe34188b7c4df \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-suggest-9.11.0-snapshot-8a555eb.jar.sha1 deleted file mode 100644 index 8cfaf60763724..0000000000000 --- a/server/licenses/lucene-suggest-9.11.0-snapshot-8a555eb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ef6d483960f776d5dbdd1009863786ee09ba5707 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.11.0-snapshot-fb97840.jar.sha1 b/server/licenses/lucene-suggest-9.11.0-snapshot-fb97840.jar.sha1 new file mode 100644 index 0000000000000..2c147e4651a44 --- /dev/null +++ b/server/licenses/lucene-suggest-9.11.0-snapshot-fb97840.jar.sha1 @@ -0,0 +1 @@ +86f68cacd85f99b4ddcda3aff7c873349ba59381 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java index 34b60d5f3e9b3..e316bae5d8ebc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java @@ -41,6 +41,7 @@ public class RemoteDualReplicationIT extends MigrationBaseTestCase { private final String REMOTE_PRI_DOCREP_REP = "remote-primary-docrep-replica"; private final String REMOTE_PRI_DOCREP_REMOTE_REP = "remote-primary-docrep-remote-replica"; private final String FAILOVER_REMOTE_TO_DOCREP = "failover-remote-to-docrep"; + private final String FAILOVER_REMOTE_TO_REMOTE = "failover-remote-to-remote"; @Override protected Collection> nodePlugins() { @@ -241,14 +242,63 @@ RLs on remote enabled copies are brought up to (GlobalCkp + 1) upon a flush requ */ extraSettings = Settings.builder().put(IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.getKey(), "3s").build(); testRemotePrimaryDocRepAndRemoteReplica(); - DiscoveryNodes nodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); - assertBusy(() -> { - for (ShardStats shardStats : internalCluster().client() + pollAndCheckRetentionLeases(REMOTE_PRI_DOCREP_REMOTE_REP); + } + + public void testMissingRetentionLeaseCreatedOnFailedOverRemoteReplica() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + + logger.info("---> Starting docrep data node"); + internalCluster().startDataOnlyNode(); + + Settings zeroReplicasAndOverridenSyncIntervals = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms") + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") + .build(); + createIndex(FAILOVER_REMOTE_TO_REMOTE, zeroReplicasAndOverridenSyncIntervals); + ensureGreen(FAILOVER_REMOTE_TO_REMOTE); + + indexBulk(FAILOVER_REMOTE_TO_REMOTE, 100); + + logger.info("---> Starting first remote node"); + initDocRepToRemoteMigration(); + addRemote = true; + String firstRemoteNode = internalCluster().startDataOnlyNode(); + String primaryShardHostingNode = primaryNodeName(FAILOVER_REMOTE_TO_REMOTE); + logger.info("---> Moving primary copy from {} to remote enabled node {}", primaryShardHostingNode, firstRemoteNode); + assertAcked( + internalCluster().client() + .admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(FAILOVER_REMOTE_TO_REMOTE, 0, primaryShardHostingNode, firstRemoteNode)) + .get() + ); + ensureGreen(FAILOVER_REMOTE_TO_REMOTE); + assertReplicaAndPrimaryConsistency(FAILOVER_REMOTE_TO_REMOTE, 100, 0); + + String secondRemoteNode = internalCluster().startDataOnlyNode(); + Settings twoReplicas = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2).build(); + assertAcked( + internalCluster().client() .admin() .indices() - .prepareStats(REMOTE_PRI_DOCREP_REMOTE_REP) + .prepareUpdateSettings() + .setIndices(FAILOVER_REMOTE_TO_REMOTE) + .setSettings(twoReplicas) .get() - .getShards()) { + ); + ensureGreen(FAILOVER_REMOTE_TO_REMOTE); + + logger.info("---> Checking retention leases"); + pollAndCheckRetentionLeases(FAILOVER_REMOTE_TO_REMOTE); + } + + private void pollAndCheckRetentionLeases(String indexName) throws Exception { + DiscoveryNodes nodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + assertBusy(() -> { + for (ShardStats shardStats : internalCluster().client().admin().indices().prepareStats(indexName).get().getShards()) { ShardRouting shardRouting = shardStats.getShardRouting(); DiscoveryNode discoveryNode = nodes.get(shardRouting.currentNodeId()); RetentionLeases retentionLeases = shardStats.getRetentionLeaseStats().retentionLeases(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index d34a5f4edbaec..95b7d4381da18 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -59,7 +59,7 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -229,7 +229,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.FIXED)) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED)) .get(); createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); Client client = client(); @@ -260,7 +260,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX)) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX)) .get(); restoreSnapshotResponse = client.admin() @@ -272,13 +272,13 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { .get(); assertEquals(RestStatus.ACCEPTED, restoreSnapshotResponse.status()); ensureGreen(restoredIndexName1version2); - validatePathType(restoredIndexName1version2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); + validatePathType(restoredIndexName1version2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1); - // Create index with cluster setting cluster.remote_store.index.path.prefix.type as hashed_prefix. + // Create index with cluster setting cluster.remote_store.index.path.type as hashed_prefix. indexSettings = getIndexSettings(1, 0).build(); createIndex(indexName2, indexSettings); ensureGreen(indexName2); - validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); + validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1); // Validating that custom data has not changed for indexes which were created before the cluster setting got updated validatePathType(indexName1, PathType.FIXED); @@ -294,7 +294,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.FIXED)) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED)) .get(); // Close index 2 @@ -309,7 +309,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { ensureGreen(indexName2); // Validating that custom data has not changed for testindex2 which was created before the cluster setting got updated - validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); + validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1); } private void validatePathType(String index, PathType pathType) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterRewriteIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterRewriteIT.java index e051265d4b3bc..3f32cf3bfbbe7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterRewriteIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterRewriteIT.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; @@ -34,6 +35,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.MAX_AGGREGATION_REWRITE_FILTERS; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -93,7 +95,7 @@ public void testMinDocCountOnDateHistogram() throws Exception { final SearchResponse allResponse = client().prepareSearch("idx") .setSize(0) .setQuery(QUERY) - .addAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(0)) + .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).minDocCount(0)) .get(); final Histogram allHisto = allResponse.getAggregations().get("histo"); @@ -104,4 +106,36 @@ public void testMinDocCountOnDateHistogram() throws Exception { assertEquals(entry.getValue(), results.get(entry.getKey())); } } + + public void testDisableOptimizationGivesSameResults() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .setSize(0) + .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).minDocCount(0)) + .get(); + + final Histogram allHisto1 = response.getAggregations().get("histo"); + + final ClusterUpdateSettingsResponse updateSettingResponse = client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(MAX_AGGREGATION_REWRITE_FILTERS.getKey(), 0)) + .get(); + + assertEquals(updateSettingResponse.getTransientSettings().get(MAX_AGGREGATION_REWRITE_FILTERS.getKey()), "0"); + + response = client().prepareSearch("idx") + .setSize(0) + .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).minDocCount(0)) + .get(); + + final Histogram allHisto2 = response.getAggregations().get("histo"); + + assertEquals(allHisto1, allHisto2); + + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull(MAX_AGGREGATION_REWRITE_FILTERS.getKey())) + .get(); + } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index ef74a794a9975..2904d49c224d7 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -525,6 +525,7 @@ public void apply(Settings value, Settings current, Settings previous) { SearchService.MAX_OPEN_SCROLL_CONTEXT, SearchService.MAX_OPEN_PIT_CONTEXT, SearchService.MAX_PIT_KEEPALIVE_SETTING, + SearchService.MAX_AGGREGATION_REWRITE_FILTERS, CreatePitController.PIT_INIT_KEEP_ALIVE, Node.WRITE_PORTS_FILE_SETTING, Node.NODE_NAME_SETTING, @@ -712,7 +713,8 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, - IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, + IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, + IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, // Admission Control Settings AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java index b51abf19fc000..9acf390c6b707 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java @@ -23,6 +23,8 @@ import static java.util.Collections.unmodifiableMap; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; +import static org.opensearch.index.remote.RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding; +import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64; /** * This class contains the different enums related to remote store like data categories and types, path types @@ -216,13 +218,26 @@ public static PathType parseString(String pathType) { @PublicApi(since = "2.14.0") public enum PathHashAlgorithm { - FNV_1A(0) { + FNV_1A_BASE64(0) { @Override String hash(PathInput pathInput) { String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType() .getName(); long hash = FNV1a.hash64(input); - return RemoteStoreUtils.longToUrlBase64(hash); + return longToUrlBase64(hash); + } + }, + /** + * This hash algorithm will generate a hash value which will use 1st 6 bits to create bas64 character and next 14 + * bits to create binary string. + */ + FNV_1A_COMPOSITE_1(1) { + @Override + String hash(PathInput pathInput) { + String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType() + .getName(); + long hash = FNV1a.hash64(input); + return longToCompositeBase64AndBinaryEncoding(hash, 20); } }; diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java index 5b067115df781..f6925bcbcc92d 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java @@ -25,12 +25,16 @@ public class RemoteStorePathStrategyResolver { private volatile PathType type; + private volatile PathHashAlgorithm hashAlgorithm; + private final Supplier minNodeVersionSupplier; public RemoteStorePathStrategyResolver(ClusterSettings clusterSettings, Supplier minNodeVersionSupplier) { this.minNodeVersionSupplier = minNodeVersionSupplier; - type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING); - clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, this::setType); + type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING); + hashAlgorithm = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING); + clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, this::setType); + clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, this::setHashAlgorithm); } public RemoteStorePathStrategy get() { @@ -39,11 +43,15 @@ public RemoteStorePathStrategy get() { // Min node version check ensures that we are enabling the new prefix type only when all the nodes understand it. pathType = Version.CURRENT.compareTo(minNodeVersionSupplier.get()) <= 0 ? type : PathType.FIXED; // If the path type is fixed, hash algorithm is not applicable. - pathHashAlgorithm = pathType == PathType.FIXED ? null : PathHashAlgorithm.FNV_1A; + pathHashAlgorithm = pathType == PathType.FIXED ? null : hashAlgorithm; return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); } private void setType(PathType type) { this.type = type; } + + private void setHashAlgorithm(PathHashAlgorithm hashAlgorithm) { + this.hashAlgorithm = hashAlgorithm; + } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index 7d0743e70b6cb..4d1d98334c3c4 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -15,6 +15,7 @@ import java.util.Base64; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.function.Function; @@ -26,10 +27,16 @@ public class RemoteStoreUtils { public static final int LONG_MAX_LENGTH = String.valueOf(Long.MAX_VALUE).length(); + /** + * URL safe base 64 character set. This must not be changed as this is used in deriving the base64 equivalent of binary. + */ + static final char[] URL_BASE64_CHARSET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_".toCharArray(); + /** * This method subtracts given numbers from Long.MAX_VALUE and returns a string representation of the result. * The resultant string is guaranteed to be of the same length that of Long.MAX_VALUE. If shorter, we add left padding * of 0s to the string. + * * @param num number to get the inverted long string for * @return String value of Long.MAX_VALUE - num */ @@ -46,6 +53,7 @@ public static String invertLong(long num) { /** * This method converts the given string into long and subtracts it from Long.MAX_VALUE + * * @param str long in string format to be inverted * @return long value of the invert result */ @@ -59,6 +67,7 @@ public static long invertLong(String str) { /** * Extracts the segment name from the provided segment file name + * * @param filename Segment file name to parse * @return Name of the segment that the segment file belongs to */ @@ -79,10 +88,9 @@ public static String getSegmentName(String filename) { } /** - * * @param mdFiles List of segment/translog metadata files - * @param fn Function to extract PrimaryTerm_Generation and Node Id from metadata file name . - * fn returns null if node id is not part of the file name + * @param fn Function to extract PrimaryTerm_Generation and Node Id from metadata file name . + * fn returns null if node id is not part of the file name */ public static void verifyNoMultipleWriters(List mdFiles, Function> fn) { Map nodesByPrimaryTermAndGen = new HashMap<>(); @@ -116,4 +124,26 @@ static String longToUrlBase64(long value) { String base64Str = Base64.getUrlEncoder().encodeToString(hashBytes); return base64Str.substring(0, base64Str.length() - 1); } + + static long urlBase64ToLong(String base64Str) { + byte[] hashBytes = Base64.getUrlDecoder().decode(base64Str); + return ByteBuffer.wrap(hashBytes).getLong(); + } + + /** + * Converts an input hash which occupies 64 bits of memory into a composite encoded string. The string will have 2 parts - + * 1. Base 64 string and 2. Binary String. We will use the first 6 bits for creating the base 64 string. + * For the second part, the rest of the bits (of length {@code len}-6) will be used as is in string form. + */ + static String longToCompositeBase64AndBinaryEncoding(long value, int len) { + if (len < 7 || len > 64) { + throw new IllegalArgumentException("In longToCompositeBase64AndBinaryEncoding, len must be between 7 and 64 (both inclusive)"); + } + String binaryEncoding = String.format(Locale.ROOT, "%64s", Long.toBinaryString(value)).replace(' ', '0'); + String base64Part = binaryEncoding.substring(0, 6); + String binaryPart = binaryEncoding.substring(6, len); + int base64DecimalValue = Integer.valueOf(base64Part, 2); + assert base64DecimalValue >= 0 && base64DecimalValue < 64; + return URL_BASE64_CHARSET[base64DecimalValue] + binaryPart; + } } diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 599c54f293e89..39b1cc130d6a5 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -255,6 +255,12 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L private final Function isShardOnRemoteEnabledNode; + /** + * Flag to indicate whether {@link ReplicationTracker#createMissingPeerRecoveryRetentionLeases(ActionListener)} + * has been run successfully + */ + private boolean createdMissingRetentionLeases; + /** * Get all retention leases tracked on this shard. * @@ -955,7 +961,13 @@ private boolean invariant() { assert checkpoints.get(aId) != null : "aId [" + aId + "] is pending in sync but isn't tracked"; } - if (primaryMode && indexSettings.isSoftDeleteEnabled() && hasAllPeerRecoveryRetentionLeases) { + if (primaryMode && indexSettings.isSoftDeleteEnabled() && hasAllPeerRecoveryRetentionLeases + // Skip assertion if createMissingPeerRecoveryRetentionLeases has not yet run after activating primary context + // This is required since during an ongoing remote store migration, + // remote enabled primary taking over primary context from another remote enabled shard + // might not have retention leases for docrep shard copies + // (since all RetentionLease sync actions are blocked on remote shard copies) + && createdMissingRetentionLeases) { // all tracked shard copies have a corresponding peer-recovery retention lease for (final ShardRouting shardRouting : routingTable.assignedShards()) { final CheckpointState cps = checkpoints.get(shardRouting.allocationId().getId()); @@ -1843,19 +1855,34 @@ private synchronized void setHasAllPeerRecoveryRetentionLeases() { assert invariant(); } + private synchronized void setCreatedMissingRetentionLeases() { + createdMissingRetentionLeases = true; + assert invariant(); + } + public synchronized boolean hasAllPeerRecoveryRetentionLeases() { return hasAllPeerRecoveryRetentionLeases; } /** - * Create any required peer-recovery retention leases that do not currently exist because we just did a rolling upgrade from a version - * prior to {@code LegacyESVersion#V_7_4_0} that does not create peer-recovery retention leases. + * Create any required peer-recovery retention leases that do not currently exist. This can happen if either: + * - We just did a rolling upgrade from a version prior to {@code LegacyESVersion#V_7_4_0} that does not create peer-recovery retention leases. + * - In a mixed mode cluster (during remote store migration), a remote enabled primary shard copy fails over to another remote enabled shard copy, + * but the replication group still has other shards in docrep nodes */ public synchronized void createMissingPeerRecoveryRetentionLeases(ActionListener listener) { - if (hasAllPeerRecoveryRetentionLeases == false) { + // Create missing RetentionLeases if the primary is on a remote enabled + // and the replication group has at-least one shard copy in docrep enabled node + // No-Op if retention leases for the tracked shard copy already exists + boolean createMissingRetentionLeasesDuringMigration = indexSettings.isAssignedOnRemoteNode() + && replicationGroup.getReplicationTargets() + .stream() + .anyMatch(shardRouting -> isShardOnRemoteEnabledNode.apply(shardRouting.currentNodeId()) == false); + if (hasAllPeerRecoveryRetentionLeases == false || createMissingRetentionLeasesDuringMigration) { final List shardRoutings = routingTable.assignedShards(); final GroupedActionListener groupedActionListener = new GroupedActionListener<>(ActionListener.wrap(vs -> { setHasAllPeerRecoveryRetentionLeases(); + setCreatedMissingRetentionLeases(); listener.onResponse(null); }, listener::onFailure), shardRoutings.size()); for (ShardRouting shardRouting : shardRoutings) { diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 7e2ea5a77cbfa..df473a94a863e 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -124,6 +124,7 @@ import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.recovery.RecoveryStats; import org.opensearch.index.refresh.RefreshStats; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.search.stats.SearchStats; @@ -307,17 +308,30 @@ public class IndicesService extends AbstractLifecycleComponent ); /** - * This setting is used to set the remote store blob store path prefix strategy. This setting is effective only for + * This setting is used to set the remote store blob store path type strategy. This setting is effective only for * remote store enabled cluster. */ - public static final Setting CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING = new Setting<>( - "cluster.remote_store.index.path.prefix.type", + public static final Setting CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING = new Setting<>( + "cluster.remote_store.index.path.type", PathType.FIXED.toString(), PathType::parseString, Property.NodeScope, Property.Dynamic ); + /** + * This setting is used to set the remote store blob store path hash algorithm strategy. This setting is effective only for + * remote store enabled cluster. This setting will come to effect if the {@link #CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING} + * is either {@code HASHED_PREFIX} or {@code HASHED_INFIX}. + */ + public static final Setting CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING = new Setting<>( + "cluster.remote_store.index.path.hash_algorithm", + PathHashAlgorithm.FNV_1A_COMPOSITE_1.toString(), + PathHashAlgorithm::parseString, + Property.NodeScope, + Property.Dynamic + ); + /** * The node's settings. */ diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index c76ea71c0a094..cd8714f6b556a 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -107,6 +107,7 @@ import java.util.function.LongSupplier; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.MAX_AGGREGATION_REWRITE_FILTERS; /** * The main search context used during search phase @@ -187,6 +188,7 @@ final class DefaultSearchContext extends SearchContext { private final Function requestToAggReduceContextBuilder; private final boolean concurrentSearchSettingsEnabled; private final SetOnce requestShouldUseConcurrentSearch = new SetOnce<>(); + private final int maxAggRewriteFilters; DefaultSearchContext( ReaderContext readerContext, @@ -240,6 +242,8 @@ final class DefaultSearchContext extends SearchContext { queryBoost = request.indexBoost(); this.lowLevelCancellation = lowLevelCancellation; this.requestToAggReduceContextBuilder = requestToAggReduceContextBuilder; + + this.maxAggRewriteFilters = evaluateFilterRewriteSetting(); } @Override @@ -994,4 +998,16 @@ public boolean shouldUseTimeSeriesDescSortOptimization() { && sort.isSortOnTimeSeriesField() && sort.sort.getSort()[0].getReverse() == false; } + + @Override + public int maxAggRewriteFilters() { + return maxAggRewriteFilters; + } + + private int evaluateFilterRewriteSetting() { + if (clusterService != null) { + return clusterService.getClusterSettings().get(MAX_AGGREGATION_REWRITE_FILTERS); + } + return 0; + } } diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index f2328bea65eb1..6b3620e65a271 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -272,6 +272,15 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv Property.NodeScope ); + // value 0 means rewrite filters optimization in aggregations will be disabled + public static final Setting MAX_AGGREGATION_REWRITE_FILTERS = Setting.intSetting( + "search.max_aggregation_rewrite_filters", + 72, + 0, + Property.Dynamic, + Property.NodeScope + ); + public static final int DEFAULT_SIZE = 10; public static final int DEFAULT_FROM = 0; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java index e587b7f169e5f..dde748bf0dc07 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java @@ -63,7 +63,6 @@ private FastFilterRewriteHelper() {} private static final Logger logger = LogManager.getLogger(FastFilterRewriteHelper.class); - private static final int MAX_NUM_FILTER_BUCKETS = 1024; private static final Map, Function> queryWrappers; // Initialize the wrapper map for unwrapping the query @@ -186,8 +185,9 @@ private static Weight[] createFilterForAggregations( int bucketCount = 0; while (roundedLow <= fieldType.convertNanosToMillis(high)) { bucketCount++; - if (bucketCount > MAX_NUM_FILTER_BUCKETS) { - logger.debug("Max number of filters reached [{}], skip the fast filter optimization", MAX_NUM_FILTER_BUCKETS); + int maxNumFilterBuckets = context.maxAggRewriteFilters(); + if (bucketCount > maxNumFilterBuckets) { + logger.debug("Max number of filters reached [{}], skip the fast filter optimization", maxNumFilterBuckets); return null; } // Below rounding is needed as the interval could return in @@ -254,6 +254,8 @@ public void setAggregationType(AggregationType aggregationType) { } public boolean isRewriteable(final Object parent, final int subAggLength) { + if (context.maxAggRewriteFilters() == 0) return false; + boolean rewriteable = aggregationType.isRewriteable(parent, subAggLength); logger.debug("Fast filter rewriteable: {} for shard {}", rewriteable, context.indexShard().shardId()); this.rewriteable = rewriteable; diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index 8f6001dc06755..07f3616bbc138 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -522,4 +522,8 @@ public String toString() { public abstract int getTargetMaxSliceCount(); public abstract boolean shouldUseTimeSeriesDescSortOptimization(); + + public int maxAggRewriteFilters() { + return 0; + } } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index d3086de6ec89e..1a9321a755fef 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -1711,7 +1711,7 @@ public void testRemoteCustomData() { validateRemoteCustomData( indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), PathHashAlgorithm.NAME, - PathHashAlgorithm.FNV_1A.name() + PathHashAlgorithm.FNV_1A_COMPOSITE_1.name() ); } @@ -1720,7 +1720,7 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType if (remoteStoreEnabled) { settingsBuilder.put(NODE_ATTRIBUTES.getKey() + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "test"); } - settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), pathType.toString()); + settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType.toString()); Settings settings = settingsBuilder.build(); ClusterService clusterService = mock(ClusterService.class); diff --git a/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java b/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java index d9662d1de9e0c..98a79f3ca38dc 100644 --- a/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java +++ b/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java @@ -58,7 +58,7 @@ import static org.hamcrest.Matchers.is; public class DateUtilsTests extends OpenSearchTestCase { - private static final Set IGNORE = new HashSet<>(Arrays.asList("America/Ciudad_Juarez")); + private static final Set IGNORE = new HashSet<>(Arrays.asList("Antarctica/Vostok")); public void testTimezoneIds() { assertNull(DateUtils.dateTimeZoneToZoneId(null)); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java index fe5635063f783..575b397382f24 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java @@ -25,7 +25,8 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; -import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A; +import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; +import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.FIXED; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_INFIX; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_PREFIX; @@ -161,10 +162,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A); + BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -178,7 +179,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("DgSI70IciXs/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/data/", result.buildAsString()); // Translog Metadata @@ -190,10 +191,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -204,7 +205,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("oKU5SjILiy4/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/metadata/", result.buildAsString()); // Translog Lock files - This is a negative case where the assertion will trip. @@ -238,10 +239,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -252,7 +253,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("AUBRfCIuWdk/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/data/", result.buildAsString()); // Segment Metadata @@ -264,10 +265,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -278,7 +279,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("erwR-G735Uw/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/metadata/", result.buildAsString()); // Segment Lockfiles @@ -290,10 +291,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -304,10 +305,197 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("KeYDIk0mJXI/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/lock_files/", result.buildAsString()); } + public void testGeneratePathForHashedPrefixTypeAndFNVCompositeHashAlgorithm() { + BlobPath blobPath = new BlobPath(); + List pathList = getPathList(); + for (String path : pathList) { + blobPath = blobPath.add(path); + } + + String indexUUID = randomAlphaOfLength(10); + String shardId = String.valueOf(randomInt(100)); + DataCategory dataCategory = TRANSLOG; + DataType dataType = DATA; + + String basePath = getPath(pathList) + indexUUID + SEPARATOR + shardId; + // Translog Data + PathInput pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + BlobPath fixedBlobPath = BlobPath.cleanPath().add("xjsdhj").add("ddjsha").add("yudy7sd").add("32hdhua7").add("89jdij"); + String fixedIndexUUID = "k2ijhe877d7yuhx7"; + String fixedShardId = "10"; + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals("D10000001001000/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/data/", result.buildAsString()); + + // Translog Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals( + "o00101001010011/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/metadata/", + result.buildAsString() + ); + + // Translog Lock files - This is a negative case where the assertion will trip. + dataType = LOCK_FILES; + PathInput finalPathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_PREFIX.path(finalPathInput, null)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_PREFIX.path(finalPathInput, null)); + + // Segment Data + dataCategory = SEGMENTS; + dataType = DATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals("A01010000000101/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/data/", result.buildAsString()); + + // Segment Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals( + "e10101111000001/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/metadata/", + result.buildAsString() + ); + + // Segment Lockfiles + dataType = LOCK_FILES; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals( + "K01111001100000/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/lock_files/", + result.buildAsString() + ); + } + public void testGeneratePathForHashedInfixType() { BlobPath blobPath = new BlobPath(); List pathList = getPathList(); @@ -330,7 +518,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - BlobPath result = HASHED_INFIX.path(pathInput, FNV_1A); + BlobPath result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); String expected = derivePath(basePath, pathInput); String actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -346,7 +534,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/DgSI70IciXs/k2ijhe877d7yuhx7/10/translog/data/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -361,7 +549,7 @@ public void testGeneratePathForHashedInfixType() { .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -374,7 +562,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/oKU5SjILiy4/k2ijhe877d7yuhx7/10/translog/metadata/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -410,7 +598,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -423,7 +611,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/AUBRfCIuWdk/k2ijhe877d7yuhx7/10/segments/data/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -437,7 +625,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -450,7 +638,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/erwR-G735Uw/k2ijhe877d7yuhx7/10/segments/metadata/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -464,7 +652,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -477,7 +665,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/KeYDIk0mJXI/k2ijhe877d7yuhx7/10/segments/lock_files/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -487,7 +675,7 @@ private String derivePath(String basePath, PathInput pathInput) { return "".equals(basePath) ? String.join( SEPARATOR, - FNV_1A.hash(pathInput), + FNV_1A_BASE64.hash(pathInput), pathInput.indexUUID(), pathInput.shardId(), pathInput.dataCategory().getName(), @@ -496,7 +684,7 @@ private String derivePath(String basePath, PathInput pathInput) { : String.join( SEPARATOR, basePath, - FNV_1A.hash(pathInput), + FNV_1A_BASE64.hash(pathInput), pathInput.indexUUID(), pathInput.shardId(), pathInput.dataCategory().getName(), diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java index 9d4b41f5c395f..4aa0d11601a05 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java @@ -11,17 +11,17 @@ import org.opensearch.Version; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.test.OpenSearchTestCase; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; public class RemoteStorePathStrategyResolverTests extends OpenSearchTestCase { public void testGetMinVersionOlder() { - Settings settings = Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), randomFrom(PathType.values())) - .build(); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.V_2_13_0); assertEquals(PathType.FIXED, resolver.get().getType()); @@ -30,7 +30,7 @@ public void testGetMinVersionOlder() { public void testGetMinVersionNewer() { PathType pathType = randomFrom(PathType.values()); - Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), pathType).build(); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); assertEquals(pathType, resolver.get().getType()); @@ -39,7 +39,100 @@ public void testGetMinVersionNewer() { } else { assertNull(resolver.get().getHashAlgorithm()); } + } + + public void testGetStrategy() { + // FIXED type + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.FIXED, resolver.get().getType()); + + // FIXED type with hash algorithm + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), randomFrom(PathHashAlgorithm.values())) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.FIXED, resolver.get().getType()); + + // HASHED_PREFIX type with FNV_1A_COMPOSITE + settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // HASHED_PREFIX type with FNV_1A_COMPOSITE + settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // HASHED_PREFIX type with FNV_1A_BASE64 + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); + // HASHED_PREFIX type with FNV_1A_BASE64 + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); } + public void testGetStrategyWithDynamicUpdate() { + + // Default value + Settings settings = Settings.builder().build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.FIXED, resolver.get().getType()); + assertNull(resolver.get().getHashAlgorithm()); + + // Set HASHED_PREFIX with default hash algorithm + clusterSettings.applySettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build() + ); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // Set HASHED_PREFIX with FNV_1A_BASE64 hash algorithm + clusterSettings.applySettings( + Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build() + ); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); + + // Set HASHED_INFIX with default hash algorithm + clusterSettings.applySettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX).build() + ); + assertEquals(PathType.HASHED_INFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // Set HASHED_INFIX with FNV_1A_BASE64 hash algorithm + clusterSettings.applySettings( + Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build() + ); + assertEquals(PathType.HASHED_INFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); + } } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index 34074861f2764..4d3e633848975 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -14,13 +14,19 @@ import org.opensearch.index.translog.transfer.TranslogTransferMetadata; import org.opensearch.test.OpenSearchTestCase; +import java.math.BigInteger; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import static org.opensearch.index.remote.RemoteStoreUtils.URL_BASE64_CHARSET; +import static org.opensearch.index.remote.RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding; import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64; +import static org.opensearch.index.remote.RemoteStoreUtils.urlBase64ToLong; import static org.opensearch.index.remote.RemoteStoreUtils.verifyNoMultipleWriters; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; @@ -28,6 +34,16 @@ public class RemoteStoreUtilsTests extends OpenSearchTestCase { + private static Map BASE64_CHARSET_IDX_MAP; + + static { + Map charToIndexMap = new HashMap<>(); + for (int i = 0; i < URL_BASE64_CHARSET.length; i++) { + charToIndexMap.put(URL_BASE64_CHARSET[i], i); + } + BASE64_CHARSET_IDX_MAP = Collections.unmodifiableMap(charToIndexMap); + } + private final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( 12, 23, @@ -205,8 +221,106 @@ public void testLongToBase64() { "6kv3yZNv9kY" ); for (Map.Entry entry : longToExpectedBase64String.entrySet()) { - assertEquals(entry.getValue(), longToUrlBase64(entry.getKey())); + String base64Str = longToUrlBase64(entry.getKey()); + assertEquals(entry.getValue(), base64Str); assertEquals(11, entry.getValue().length()); + assertEquals((long) entry.getKey(), urlBase64ToLong(base64Str)); + } + + int iters = randomInt(100); + for (int i = 0; i < iters; i++) { + long value = randomLong(); + String base64Str = longToUrlBase64(value); + assertEquals(value, urlBase64ToLong(base64Str)); } } + + public void testLongToCompositeUrlBase64AndBinaryEncodingUsing20Bits() { + Map longToExpectedBase64String = Map.of( + -5537941589147079860L, + "s11001001010100", + -5878421770170594047L, + "r10011010111010", + -5147010836697060622L, + "u00100100100010", + 937096430362711837L, + "D01000000010011", + 8422273604115462710L, + "d00111000011110", + -2528761975013221124L, + "300111010000000", + -5512387536280560513L, + "s11100000000001", + -5749656451579835857L, + "s00001101010001", + 5569654857969679538L, + "T01010010110110", + -1563884000447039930L, + "610010010111111" + ); + for (Map.Entry entry : longToExpectedBase64String.entrySet()) { + String base64Str = RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding(entry.getKey(), 20); + assertEquals(entry.getValue(), base64Str); + assertEquals(15, entry.getValue().length()); + assertEquals(longToUrlBase64(entry.getKey()).charAt(0), base64Str.charAt(0)); + } + + int iters = randomInt(1000); + for (int i = 0; i < iters; i++) { + long value = randomLong(); + assertEquals(RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding(value, 20).charAt(0), longToUrlBase64(value).charAt(0)); + } + } + + public void testLongToCompositeUrlBase64AndBinaryEncoding() { + Map longToExpectedBase64String = Map.of( + -5537941589147079860L, + "s1100100101010001110111011101001000000001101010101101001100", + -5878421770170594047L, + "r1001101011101001101000101110010101000011110000110100000001", + -5147010836697060622L, + "u0010010010001001001110100111111111100101011110101011110010", + 937096430362711837L, + "D0100000001001111000011110100001100000011100101011100011101", + 8422273604115462710L, + "d0011100001111011010011100001000110011100110111101000110110", + -2528761975013221124L, + "30011101000000010000110000110110101110100100101110011111100", + -5512387536280560513L, + "s1110000000000100001011110111011011101101001101110001111111", + -5749656451579835857L, + "s0000110101000111011110101110010111000011010000101000101111", + 5569654857969679538L, + "T0101001011011000111001010110000010110011111011110010110010", + -1563884000447039930L, + "61001001011111101111100100110010011011011111111011001000110" + ); + for (Map.Entry entry : longToExpectedBase64String.entrySet()) { + Long hashValue = entry.getKey(); + String expectedCompositeEncoding = entry.getValue(); + String actualCompositeEncoding = longToCompositeBase64AndBinaryEncoding(hashValue, 64); + assertEquals(expectedCompositeEncoding, actualCompositeEncoding); + assertEquals(59, expectedCompositeEncoding.length()); + assertEquals(longToUrlBase64(entry.getKey()).charAt(0), actualCompositeEncoding.charAt(0)); + assertEquals(RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding(hashValue, 20), actualCompositeEncoding.substring(0, 15)); + + Long computedHashValue = compositeUrlBase64BinaryEncodingToLong(actualCompositeEncoding); + assertEquals(hashValue, computedHashValue); + } + + int iters = randomInt(1000); + for (int i = 0; i < iters; i++) { + long value = randomLong(); + String compositeEncoding = longToCompositeBase64AndBinaryEncoding(value, 64); + assertEquals(value, compositeUrlBase64BinaryEncodingToLong(compositeEncoding)); + } + } + + static long compositeUrlBase64BinaryEncodingToLong(String encodedValue) { + char ch = encodedValue.charAt(0); + int base64BitsIntValue = BASE64_CHARSET_IDX_MAP.get(ch); + String base64PartBinary = Integer.toBinaryString(base64BitsIntValue); + String binaryString = base64PartBinary + encodedValue.substring(1); + return new BigInteger(binaryString, 2).longValue(); + } } diff --git a/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java b/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java index e3259a3097278..e81eef67d6704 100644 --- a/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java +++ b/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java @@ -104,7 +104,7 @@ public void testToXContent() throws IOException { + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":0}"; assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; - // Case 3 - with just hashed prefix type and hash algorithm + // Case 3 - with just hashed prefix type and FNV_1A_BASE64 hash algorithm shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( snapshot, indexVersion, @@ -119,7 +119,7 @@ public void testToXContent() throws IOException { repositoryBasePath, fileNames, PathType.HASHED_PREFIX, - PathHashAlgorithm.FNV_1A + PathHashAlgorithm.FNV_1A_BASE64 ); try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { builder.startObject(); @@ -134,6 +134,99 @@ public void testToXContent() throws IOException { + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1" + ",\"path_hash_algorithm\":0}"; assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; + + // Case 4 - with just hashed prefix type and FNV_1A_COMPOSITE hash algorithm + shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_PREFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 + ); + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { + builder.startObject(); + shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + actual = builder.toString(); + } + + expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1" + + ",\"path_hash_algorithm\":1}"; + assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; + + // Case 5 - with just hashed infix type and FNV_1A_BASE64 hash algorithm + shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_BASE64 + ); + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { + builder.startObject(); + shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + actual = builder.toString(); + } + + expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2" + + ",\"path_hash_algorithm\":0}"; + assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; + + // Case 6 - with just hashed infix type and FNV_1A_COMPOSITE hash algorithm + shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 + ); + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { + builder.startObject(); + shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + actual = builder.toString(); + } + + expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2" + + ",\"path_hash_algorithm\":1}"; + assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; } public void testFromXContent() throws IOException { @@ -223,7 +316,88 @@ public void testFromXContent() throws IOException { repositoryBasePath, fileNames, PathType.HASHED_PREFIX, - PathHashAlgorithm.FNV_1A + PathHashAlgorithm.FNV_1A_BASE64 + ); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { + RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); + assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); + } + + // with pathType=PathType.HASHED_PREFIX and pathHashAlgorithm=PathHashAlgorithm.FNV_1A_COMPOSITE + xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1,\"path_hash_algorithm\":1}"; + expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + "2", + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_PREFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 + ); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { + RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); + assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); + } + + // with pathType=PathType.HASHED_INFIX and pathHashAlgorithm=PathHashAlgorithm.FNV_1A + xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2,\"path_hash_algorithm\":0}"; + expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + "2", + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_BASE64 + ); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { + RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); + assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); + } + + // with pathType=PathType.HASHED_INFIX and pathHashAlgorithm=PathHashAlgorithm.FNV_1A_COMPOSITE + xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2,\"path_hash_algorithm\":1}"; + expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + "2", + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 ); try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); @@ -232,7 +406,7 @@ public void testFromXContent() throws IOException { } public void testFromXContentInvalid() throws IOException { - final int iters = 14; + final int iters = 18; for (int iter = 0; iter < iters; iter++) { String snapshot = "test-snapshot"; long indexVersion = 1; @@ -296,21 +470,47 @@ public void testFromXContentInvalid() throws IOException { break; case 10: version = "1"; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A; - failure = "Invalid combination of pathType=null pathHashAlgorithm=FNV_1A for version=1"; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; + failure = "Invalid combination of pathType=null pathHashAlgorithm=FNV_1A_BASE64 for version=1"; break; case 11: version = "2"; pathType = PathType.FIXED; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A; - failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=FNV_1A for version=2"; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; + failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=FNV_1A_BASE64 for version=2"; break; case 12: + version = "1"; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + failure = "Invalid combination of pathType=null pathHashAlgorithm=FNV_1A_COMPOSITE_1 for version=1"; + break; + case 13: + version = "2"; + pathType = PathType.FIXED; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=FNV_1A_COMPOSITE_1 for version=2"; + break; + case 14: version = "2"; pathType = PathType.HASHED_PREFIX; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; break; - case 13: + case 15: + version = "2"; + pathType = PathType.HASHED_PREFIX; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + break; + case 16: + version = "2"; + pathType = PathType.HASHED_INFIX; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; + break; + case 17: + version = "2"; + pathType = PathType.HASHED_INFIX; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + break; + case 18: break; default: fail("shouldn't be here"); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 44ddd2de9d007..b1e2028d761f0 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -706,7 +706,7 @@ public void testCleanupAsync() throws Exception { ShardId shardId = new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt("0")); RemoteStorePathStrategy pathStrategy = randomFrom( new RemoteStorePathStrategy(PathType.FIXED), - new RemoteStorePathStrategy(PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A) + new RemoteStorePathStrategy(randomFrom(PathType.HASHED_INFIX, PathType.HASHED_PREFIX), randomFrom(PathHashAlgorithm.values())) ); RemoteSegmentStoreDirectory.remoteDirectoryCleanup( diff --git a/settings.gradle b/settings.gradle index ccc239ff17062..e7d3dc56b67b9 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.17" + id "com.gradle.enterprise" version "3.17.1" } ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE') diff --git a/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java index c412ae8317f24..5231fe095f0f0 100644 --- a/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java @@ -222,6 +222,9 @@ public abstract class AnalysisFactoryTestCase extends OpenSearchTestCase { .put("persianstem", Void.class) // https://github.com/apache/lucene/pull/12169 .put("word2vecsynonym", Void.class) + // https://github.com/apache/lucene/pull/12915 + .put("japanesehiraganauppercase", Void.class) + .put("japanesekatakanauppercase", Void.class) .immutableMap(); static final Map> KNOWN_CHARFILTERS = new MapBuilder>() diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index c26c3f8d21380..c8d44efd8076a 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -211,7 +211,7 @@ import static org.opensearch.index.IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; @@ -2619,7 +2619,7 @@ private static Settings buildRemoteStoreNodeAttributes( settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); } - settings.put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), randomFrom(PathType.values())); + settings.put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())); return settings.build(); }