diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 6281fa0af3e36..5476637b84e92 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -13,15 +13,9 @@ Resolves #[Issue number to be closed when this PR is merged] ### Check List -- [ ] New functionality includes testing. - - [ ] All tests pass -- [ ] New functionality has been documented. - - [ ] New functionality has javadoc added -- [ ] API changes companion pull request [created](https://github.com/opensearch-project/opensearch-api-specification/blob/main/DEVELOPER_GUIDE.md). -- [ ] Failing checks are inspected and point to the corresponding known issue(s) (See: [Troubleshooting Failing Builds](../blob/main/CONTRIBUTING.md#troubleshooting-failing-builds)) -- [ ] Commits are signed per the DCO using --signoff -- [ ] Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) -- [ ] Public documentation issue/PR [created](https://github.com/opensearch-project/documentation-website/issues/new/choose) +- [ ] Functionality includes testing. +- [ ] API changes companion pull request [created](https://github.com/opensearch-project/opensearch-api-specification/blob/main/DEVELOPER_GUIDE.md), if applicable. +- [ ] Public documentation issue/PR [created](https://github.com/opensearch-project/documentation-website/issues/new/choose), if applicable. By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. For more information on following Developer Certificate of Origin and signing off your commits, please check [here](https://github.com/opensearch-project/OpenSearch/blob/main/CONTRIBUTING.md#developer-certificate-of-origin). diff --git a/.github/workflows/assemble.yml b/.github/workflows/assemble.yml index 4bff5b7d60d1f..51ae075ffa2c9 100644 --- a/.github/workflows/assemble.yml +++ b/.github/workflows/assemble.yml @@ -17,10 +17,23 @@ jobs: java-version: ${{ matrix.java }} distribution: temurin - name: Setup docker (missing on MacOS) + id: setup_docker if: runner.os == 'macos' uses: douglascamata/setup-docker-macos-action@main + continue-on-error: true with: upgrade-qemu: true + colima: v0.6.8 - name: Run Gradle (assemble) + if: runner.os == 'macos' && steps.setup_docker.outcome != 'success' + run: | + # Report success even if previous step failed (Docker on MacOS runner is very unstable) + exit 0; + - name: Run Gradle (assemble) + if: runner.os != 'macos' + run: | + ./gradlew assemble --parallel --no-build-cache -PDISABLE_BUILD_CACHE + - name: Run Gradle (assemble) + if: runner.os == 'macos' && steps.setup_docker.outcome == 'success' run: | ./gradlew assemble --parallel --no-build-cache -PDISABLE_BUILD_CACHE diff --git a/.github/workflows/dco.yml b/.github/workflows/dco.yml new file mode 100644 index 0000000000000..9580d510fd108 --- /dev/null +++ b/.github/workflows/dco.yml @@ -0,0 +1,19 @@ +name: Developer Certificate of Origin Check + +on: [pull_request] + +jobs: + dco-check: + runs-on: ubuntu-latest + + steps: + - name: Get PR Commits + id: 'get-pr-commits' + uses: tim-actions/get-pr-commits@v1.1.0 + with: + token: ${{ secrets.GITHUB_TOKEN }} + - name: DCO Check + uses: tim-actions/dco@v1.1.0 + with: + commits: ${{ steps.get-pr-commits.outputs.commits }} + diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 0ba4dbe374fdd..07185ef4c65e3 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -17,7 +17,7 @@ jobs: outputs: RUN_GRADLE_CHECK: ${{ steps.changed-files-specific.outputs.any_changed }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get changed files id: changed-files-specific uses: tj-actions/changed-files@v44 diff --git a/.github/workflows/pull-request-checks.yml b/.github/workflows/pull-request-checks.yml deleted file mode 100644 index eec363572478c..0000000000000 --- a/.github/workflows/pull-request-checks.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: Pull Request Checks - -on: - pull_request: - types: - [ - opened, - edited, - review_requested, - synchronize, - reopened, - ready_for_review, - ] - -jobs: - verify-description-checklist: - name: Verify Description Checklist - runs-on: ubuntu-latest - steps: - - uses: peternied/check-pull-request-description-checklist@v1.1 - if: github.event.pull_request.user.login != 'dependabot[bot]' - with: - checklist-items: | - New functionality includes testing. - All tests pass - New functionality has been documented. - New functionality has javadoc added - Commits are signed per the DCO using --signoff - Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index af788c40ae304..1cc12f66d52e1 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies ### Changed +- Changed locale provider from COMPAT to CLDR ([13988](https://github.com/opensearch-project/OpenSearch/pull/13988)) - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) - Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) - Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) @@ -24,7 +25,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) - Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) - Breaking change: Do not request "search_pipelines" metrics by default in NodesInfoRequest ([#12497](https://github.com/opensearch-project/OpenSearch/pull/12497)) -- Remove deprecated constant META_FIELDS_BEFORE_7DOT8 ([#13860](https://github.com/opensearch-project/OpenSearch/pull/13860)) ### Deprecated diff --git a/CHANGELOG.md b/CHANGELOG.md index 5416005e4706c..759cf53adf8b8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- Add leader and follower check failure counter metrics ([#12439](https://github.com/opensearch-project/OpenSearch/pull/12439)) - Add latency metrics for instrumenting critical clusterManager code paths ([#12333](https://github.com/opensearch-project/OpenSearch/pull/12333)) - Add support for Azure Managed Identity in repository-azure ([#12423](https://github.com/opensearch-project/OpenSearch/issues/12423)) - Add useCompoundFile index setting ([#13478](https://github.com/opensearch-project/OpenSearch/pull/13478)) @@ -13,6 +14,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Remote Store] Upload translog checkpoint as object metadata to translog.tlog([#13637](https://github.com/opensearch-project/OpenSearch/pull/13637)) - Add getMetadataFields to MapperService ([#13819](https://github.com/opensearch-project/OpenSearch/pull/13819)) - [Remote State] Add async remote state deletion task running on an interval, configurable by a setting ([#13131](https://github.com/opensearch-project/OpenSearch/pull/13131)) +- Allow setting query parameters on requests ([#13776](https://github.com/opensearch-project/OpenSearch/issues/13776)) +- Add remote routing table for remote state publication with experimental feature flag ([#13304](https://github.com/opensearch-project/OpenSearch/pull/13304)) ### Dependencies - Bump `com.github.spullara.mustache.java:compiler` from 0.9.10 to 0.9.13 ([#13329](https://github.com/opensearch-project/OpenSearch/pull/13329), [#13559](https://github.com/opensearch-project/OpenSearch/pull/13559)) @@ -30,6 +33,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `reactor-netty` from 1.1.17 to 1.1.19 ([#13825](https://github.com/opensearch-project/OpenSearch/pull/13825)) - Bump `commons-cli:commons-cli` from 1.7.0 to 1.8.0 ([#13840](https://github.com/opensearch-project/OpenSearch/pull/13840)) - Bump `org.apache.xmlbeans:xmlbeans` from 5.2.0 to 5.2.1 ([#13839](https://github.com/opensearch-project/OpenSearch/pull/13839)) +- Bump `actions/checkout` from 3 to 4 ([#13935](https://github.com/opensearch-project/OpenSearch/pull/13935)) +- Bump `com.netflix.nebula.ospackage-base` from 11.9.0 to 11.9.1 ([#13933](https://github.com/opensearch-project/OpenSearch/pull/13933)) ### Changed - Add ability for Boolean and date field queries to run when only doc_values are enabled ([#11650](https://github.com/opensearch-project/OpenSearch/pull/11650)) @@ -45,7 +50,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix get field mapping API returns 404 error in mixed cluster with multiple versions ([#13624](https://github.com/opensearch-project/OpenSearch/pull/13624)) - Allow clearing `remote_store.compatibility_mode` setting ([#13646](https://github.com/opensearch-project/OpenSearch/pull/13646)) - Fix ReplicaShardBatchAllocator to batch shards without duplicates ([#13710](https://github.com/opensearch-project/OpenSearch/pull/13710)) -- Pass parent filter to inner hit query ([#13770](https://github.com/opensearch-project/OpenSearch/pull/13770)) +- Don't return negative scores from `multi_match` query with `cross_fields` type ([#13829](https://github.com/opensearch-project/OpenSearch/pull/13829)) +- Painless: ensure type "UnmodifiableMap" for params ([#13885](https://github.com/opensearch-project/OpenSearch/pull/13885)) +- Pass parent filter to inner hit query ([#13903](https://github.com/opensearch-project/OpenSearch/pull/13903)) +- Fix NPE on restore searchable snapshot ([#13911](https://github.com/opensearch-project/OpenSearch/pull/13911)) ### Security diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index 92ef71b92da7e..bc11e7335af49 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -62,6 +62,7 @@ - [LineLint](#linelint) - [Lucene Snapshots](#lucene-snapshots) - [Flaky Tests](#flaky-tests) + - [Gradle Check Metrics Dashboard](#gradle-check-metrics-dashboard) # Developer Guide @@ -660,3 +661,7 @@ If you encounter a build/test failure in CI that is unrelated to the change in y 4. If an existing issue is found, paste a link to the known issue in a comment to your PR. 5. If no existing issue is found, open one. 6. Retry CI via the GitHub UX or by pushing an update to your PR. + +### Gradle Check Metrics Dashboard + +To get the comprehensive insights and analysis of the Gradle Check test failures, visit the [OpenSearch Gradle Check Metrics Dashboard](https://metrics.opensearch.org/_dashboards/app/dashboards#/view/e5e64d40-ed31-11ee-be99-69d1dbc75083). This dashboard is part of the [OpenSearch Metrics Project](https://github.com/opensearch-project/opensearch-metrics) initiative. The dashboard contains multiple data points that can help investigate and resolve flaky failures. Additionally, this dashboard can be used to drill down, slice, and dice the data using multiple supported filters, which further aids in troubleshooting and resolving issues. diff --git a/build.gradle b/build.gradle index e92f396e006f5..55b31ca816214 100644 --- a/build.gradle +++ b/build.gradle @@ -55,7 +55,6 @@ plugins { id 'opensearch.docker-support' id 'opensearch.global-build-info' id "com.diffplug.spotless" version "6.25.0" apply false - id "org.gradle.test-retry" version "1.5.9" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' } @@ -71,6 +70,13 @@ apply from: 'gradle/run.gradle' apply from: 'gradle/missing-javadoc.gradle' apply from: 'gradle/code-coverage.gradle' +// Disable unconditional publishing of build scans +develocity { + buildScan { + publishing.onlyIf { false } + } +} + // common maven publishing configuration allprojects { group = 'org.opensearch' @@ -462,9 +468,8 @@ gradle.projectsEvaluated { // test retry configuration subprojects { - apply plugin: "org.gradle.test-retry" tasks.withType(Test).configureEach { - retry { + develocity.testRetry { if (BuildParams.isCi()) { maxRetries = 3 maxFailures = 10 diff --git a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java index 2ea8c2d015ecc..d0cb2da9c1dd3 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java @@ -110,7 +110,7 @@ public void execute(Task t) { if (BuildParams.getRuntimeJavaVersion() == JavaVersion.VERSION_1_8) { test.systemProperty("java.locale.providers", "SPI,JRE"); } else { - test.systemProperty("java.locale.providers", "SPI,COMPAT"); + test.systemProperty("java.locale.providers", "SPI,CLDR"); if (test.getJavaVersion().compareTo(JavaVersion.VERSION_17) < 0) { test.jvmArgs("--illegal-access=warn"); } diff --git a/client/rest/src/main/java/org/opensearch/client/Request.java b/client/rest/src/main/java/org/opensearch/client/Request.java index 441b01b0891ad..32fedee0c97bf 100644 --- a/client/rest/src/main/java/org/opensearch/client/Request.java +++ b/client/rest/src/main/java/org/opensearch/client/Request.java @@ -110,7 +110,13 @@ public void addParameters(Map paramSource) { * will change it. */ public Map getParameters() { - return unmodifiableMap(parameters); + if (options.getParameters().isEmpty()) { + return unmodifiableMap(parameters); + } else { + Map combinedParameters = new HashMap<>(parameters); + combinedParameters.putAll(options.getParameters()); + return unmodifiableMap(combinedParameters); + } } /** diff --git a/client/rest/src/main/java/org/opensearch/client/RequestOptions.java b/client/rest/src/main/java/org/opensearch/client/RequestOptions.java index 189d785faaf45..bbc1f8bc85fcb 100644 --- a/client/rest/src/main/java/org/opensearch/client/RequestOptions.java +++ b/client/rest/src/main/java/org/opensearch/client/RequestOptions.java @@ -40,8 +40,11 @@ import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Objects; +import java.util.stream.Collectors; /** * The portion of an HTTP request to OpenSearch that can be @@ -53,18 +56,21 @@ public final class RequestOptions { */ public static final RequestOptions DEFAULT = new Builder( Collections.emptyList(), + Collections.emptyMap(), HeapBufferedResponseConsumerFactory.DEFAULT, null, null ).build(); private final List
headers; + private final Map parameters; private final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory; private final WarningsHandler warningsHandler; private final RequestConfig requestConfig; private RequestOptions(Builder builder) { this.headers = Collections.unmodifiableList(new ArrayList<>(builder.headers)); + this.parameters = Collections.unmodifiableMap(new HashMap<>(builder.parameters)); this.httpAsyncResponseConsumerFactory = builder.httpAsyncResponseConsumerFactory; this.warningsHandler = builder.warningsHandler; this.requestConfig = builder.requestConfig; @@ -74,7 +80,7 @@ private RequestOptions(Builder builder) { * Create a builder that contains these options but can be modified. */ public Builder toBuilder() { - return new Builder(headers, httpAsyncResponseConsumerFactory, warningsHandler, requestConfig); + return new Builder(headers, parameters, httpAsyncResponseConsumerFactory, warningsHandler, requestConfig); } /** @@ -84,6 +90,14 @@ public List
getHeaders() { return headers; } + /** + * Query parameters to attach to the request. Any parameters present here + * will override matching parameters in the {@link Request}, if they exist. + */ + public Map getParameters() { + return parameters; + } + /** * The {@link HttpAsyncResponseConsumerFactory} used to create one * {@link AsyncResponseConsumer} callback per retry. Controls how the @@ -142,6 +156,12 @@ public String toString() { b.append(headers.get(h).toString()); } } + if (parameters.size() > 0) { + if (comma) b.append(", "); + comma = true; + b.append("parameters="); + b.append(parameters.entrySet().stream().map(e -> e.getKey() + "=" + e.getValue()).collect(Collectors.joining(","))); + } if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) { if (comma) b.append(", "); comma = true; @@ -170,6 +190,7 @@ public boolean equals(Object obj) { RequestOptions other = (RequestOptions) obj; return headers.equals(other.headers) + && parameters.equals(other.parameters) && httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory) && Objects.equals(warningsHandler, other.warningsHandler); } @@ -179,7 +200,7 @@ public boolean equals(Object obj) { */ @Override public int hashCode() { - return Objects.hash(headers, httpAsyncResponseConsumerFactory, warningsHandler); + return Objects.hash(headers, parameters, httpAsyncResponseConsumerFactory, warningsHandler); } /** @@ -189,17 +210,20 @@ public int hashCode() { */ public static class Builder { private final List
headers; + private final Map parameters; private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory; private WarningsHandler warningsHandler; private RequestConfig requestConfig; private Builder( List
headers, + Map parameters, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, WarningsHandler warningsHandler, RequestConfig requestConfig ) { this.headers = new ArrayList<>(headers); + this.parameters = new HashMap<>(parameters); this.httpAsyncResponseConsumerFactory = httpAsyncResponseConsumerFactory; this.warningsHandler = warningsHandler; this.requestConfig = requestConfig; @@ -226,6 +250,21 @@ public Builder addHeader(String name, String value) { return this; } + /** + * Add the provided query parameter to the request. Any parameters added here + * will override matching parameters in the {@link Request}, if they exist. + * + * @param name the query parameter name + * @param value the query parameter value + * @throws NullPointerException if {@code name} or {@code value} is null. + */ + public Builder addParameter(String name, String value) { + Objects.requireNonNull(name, "query parameter name cannot be null"); + Objects.requireNonNull(value, "query parameter value cannot be null"); + this.parameters.put(name, value); + return this; + } + /** * Set the {@link HttpAsyncResponseConsumerFactory} used to create one * {@link AsyncResponseConsumer} callback per retry. Controls how the diff --git a/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java b/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java index a7f9a48c73393..06fc92559c2d3 100644 --- a/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java @@ -39,12 +39,15 @@ import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; @@ -90,6 +93,39 @@ public void testAddHeader() { } } + public void testAddParameter() { + assertThrows( + "query parameter name cannot be null", + NullPointerException.class, + () -> randomBuilder().addParameter(null, randomAsciiLettersOfLengthBetween(3, 10)) + ); + + assertThrows( + "query parameter value cannot be null", + NullPointerException.class, + () -> randomBuilder().addParameter(randomAsciiLettersOfLengthBetween(3, 10), null) + ); + + RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); + int numParameters = between(0, 5); + Map parameters = new HashMap<>(); + for (int i = 0; i < numParameters; i++) { + String name = randomAsciiAlphanumOfLengthBetween(5, 10); + String value = randomAsciiAlphanumOfLength(3); + parameters.put(name, value); + builder.addParameter(name, value); + } + RequestOptions options = builder.build(); + assertEquals(parameters, options.getParameters()); + + try { + options.getParameters().put(randomAsciiAlphanumOfLengthBetween(5, 10), randomAsciiAlphanumOfLength(3)); + fail("expected failure"); + } catch (UnsupportedOperationException e) { + assertNull(e.getMessage()); + } + } + public void testSetHttpAsyncResponseConsumerFactory() { try { RequestOptions.DEFAULT.toBuilder().setHttpAsyncResponseConsumerFactory(null); @@ -145,6 +181,13 @@ static RequestOptions.Builder randomBuilder() { } } + if (randomBoolean()) { + int queryParamCount = between(1, 5); + for (int i = 0; i < queryParamCount; i++) { + builder.addParameter(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3)); + } + } + if (randomBoolean()) { builder.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(1)); } diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index fbd13f03af814..211b3bd55da60 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.9.0" + id "com.netflix.nebula.ospackage-base" version "11.9.1" } void addProcessFilesTask(String type, boolean jdk) { diff --git a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java index 726c381db09f6..af7138569972a 100644 --- a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java +++ b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java @@ -105,13 +105,8 @@ private static String javaLocaleProviders() { SPI setting is used to allow loading custom CalendarDataProvider in jdk8 it has to be loaded from jre/lib/ext, in jdk9+ it is already within ES project and on a classpath - - Due to internationalization enhancements in JDK 9 OpenSearch need to set the provider to COMPAT otherwise time/date - parsing will break in an incompatible way for some date patterns and locales. - //TODO COMPAT will be deprecated in at some point, see please https://bugs.openjdk.java.net/browse/JDK-8232906 - See also: documentation in server/org.opensearch.common.time.IsoCalendarDataProvider */ - return "-Djava.locale.providers=SPI,COMPAT"; + return "-Djava.locale.providers=SPI,CLDR"; } } diff --git a/gradle.properties b/gradle.properties index 7c359ed2b652c..4e8c5b98116c1 100644 --- a/gradle.properties +++ b/gradle.properties @@ -22,7 +22,7 @@ org.gradle.jvmargs=-Xmx3g -XX:+HeapDumpOnOutOfMemoryError -Xss2m \ options.forkOptions.memoryMaximumSize=3g # Disable Gradle Enterprise Gradle plugin's test retry -systemProp.gradle.enterprise.testretry.enabled=false +systemProp.develocity.testretry.enabled.enabled=false # Disable duplicate project id detection # See https://docs.gradle.org/current/userguide/upgrading_version_6.html#duplicate_project_names_may_cause_publication_to_fail diff --git a/gradle/ide.gradle b/gradle/ide.gradle index 14d6b2982ccd0..4c4f3b07836c5 100644 --- a/gradle/ide.gradle +++ b/gradle/ide.gradle @@ -81,7 +81,7 @@ if (System.getProperty('idea.active') == 'true') { } runConfigurations { defaults(JUnit) { - vmParameters = '-ea -Djava.locale.providers=SPI,COMPAT' + vmParameters = '-ea -Djava.locale.providers=SPI,CLDR' if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_17) { vmParameters += ' -Djava.security.manager=allow' } diff --git a/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/BufferedChecksumStreamInput.java similarity index 96% rename from server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamInput.java rename to libs/core/src/main/java/org/opensearch/core/common/io/stream/BufferedChecksumStreamInput.java index f75f27b7bcb91..41680961b36e9 100644 --- a/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamInput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/BufferedChecksumStreamInput.java @@ -30,12 +30,10 @@ * GitHub history for details. */ -package org.opensearch.index.translog; +package org.opensearch.core.common.io.stream; import org.apache.lucene.store.BufferedChecksum; import org.apache.lucene.util.BitUtil; -import org.opensearch.core.common.io.stream.FilterStreamInput; -import org.opensearch.core.common.io.stream.StreamInput; import java.io.EOFException; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamOutput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/BufferedChecksumStreamOutput.java similarity index 96% rename from server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamOutput.java rename to libs/core/src/main/java/org/opensearch/core/common/io/stream/BufferedChecksumStreamOutput.java index 9e96664c79cc5..422f956c0cd47 100644 --- a/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamOutput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/BufferedChecksumStreamOutput.java @@ -30,11 +30,10 @@ * GitHub history for details. */ -package org.opensearch.index.translog; +package org.opensearch.core.common.io.stream; import org.apache.lucene.store.BufferedChecksum; import org.opensearch.common.annotation.PublicApi; -import org.opensearch.core.common.io.stream.StreamOutput; import java.io.IOException; import java.util.zip.CRC32; diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java index 976f353100c55..552945d085884 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java @@ -157,6 +157,9 @@ public static XContentBuilder builder(XContent xContent, Set includes, S /** * Returns a string representation of the builder (only applicable for text based xcontent). + * Note: explicitly or implicitly (from debugger) calling toString() could cause XContentBuilder + * to close which is a side effect done by @see BytesReference#bytes(). + * Trying to write more contents after toString() will cause NPE. Use it with caution. */ @Override public String toString() { diff --git a/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheIT.java similarity index 99% rename from modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java rename to modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheIT.java index bfc184cff0566..02be0990eb136 100644 --- a/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java +++ b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheIT.java @@ -65,7 +65,7 @@ protected Collection> nodePlugins() { return Arrays.asList(TieredSpilloverCachePlugin.class, MockDiskCachePlugin.class); } - private Settings defaultSettings(String onHeapCacheSizeInBytesOrPecentage) { + static Settings defaultSettings(String onHeapCacheSizeInBytesOrPercentage) { return Settings.builder() .put(FeatureFlags.PLUGGABLE_CACHE, "true") .put( @@ -88,7 +88,7 @@ private Settings defaultSettings(String onHeapCacheSizeInBytesOrPecentage) { OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) .get(MAXIMUM_SIZE_IN_BYTES_KEY) .getKey(), - onHeapCacheSizeInBytesOrPecentage + onHeapCacheSizeInBytesOrPercentage ) .build(); } diff --git a/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java new file mode 100644 index 0000000000000..537caccbac652 --- /dev/null +++ b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java @@ -0,0 +1,501 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.service.NodeCacheStats; +import org.opensearch.common.cache.stats.ImmutableCacheStats; +import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.cache.request.RequestCacheStats; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.indices.IndicesRequestCache; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.hamcrest.OpenSearchAssertions; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_NAME; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_DISK; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_ON_HEAP; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; + +// Use a single data node to simplify accessing cache stats across different shards. +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class TieredSpilloverCacheStatsIT extends OpenSearchIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Arrays.asList(TieredSpilloverCachePlugin.class, TieredSpilloverCacheIT.MockDiskCachePlugin.class); + } + + private final String HEAP_CACHE_SIZE_STRING = "10000B"; + private final int HEAP_CACHE_SIZE = 10_000; + private final String index1Name = "index1"; + private final String index2Name = "index2"; + + /** + * Test aggregating by indices + */ + public void testIndicesLevelAggregation() throws Exception { + internalCluster().startNodes( + 1, + Settings.builder() + .put(TieredSpilloverCacheIT.defaultSettings(HEAP_CACHE_SIZE_STRING)) + .put( + TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + new TimeValue(0, TimeUnit.SECONDS) + ) + .build() + ); + Client client = client(); + Map values = setupCacheForAggregationTests(client); + + ImmutableCacheStatsHolder allLevelsStatsHolder = getNodeCacheStatsResult( + client, + List.of(IndicesRequestCache.INDEX_DIMENSION_NAME, TIER_DIMENSION_NAME) + ); + ImmutableCacheStatsHolder indicesOnlyStatsHolder = getNodeCacheStatsResult( + client, + List.of(IndicesRequestCache.INDEX_DIMENSION_NAME) + ); + + // Get values for indices alone, assert these match for statsHolders that have additional dimensions vs. a statsHolder that only has + // the indices dimension + ImmutableCacheStats index1ExpectedStats = returnNullIfAllZero( + new ImmutableCacheStats( + values.get("hitsOnHeapIndex1") + values.get("hitsOnDiskIndex1"), + values.get("itemsOnDiskIndex1AfterTest") + values.get("itemsOnHeapIndex1AfterTest"), + 0, + (values.get("itemsOnDiskIndex1AfterTest") + values.get("itemsOnHeapIndex1AfterTest")) * values.get("singleSearchSize"), + values.get("itemsOnDiskIndex1AfterTest") + values.get("itemsOnHeapIndex1AfterTest") + ) + ); + ImmutableCacheStats index2ExpectedStats = returnNullIfAllZero( + new ImmutableCacheStats( + values.get("hitsOnHeapIndex2") + values.get("hitsOnDiskIndex2"), + values.get("itemsOnDiskIndex2AfterTest") + values.get("itemsOnHeapIndex2AfterTest"), + 0, + (values.get("itemsOnDiskIndex2AfterTest") + values.get("itemsOnHeapIndex2AfterTest")) * values.get("singleSearchSize"), + values.get("itemsOnDiskIndex2AfterTest") + values.get("itemsOnHeapIndex2AfterTest") + ) + ); + + for (ImmutableCacheStatsHolder statsHolder : List.of(allLevelsStatsHolder, indicesOnlyStatsHolder)) { + assertEquals(index1ExpectedStats, statsHolder.getStatsForDimensionValues(List.of(index1Name))); + assertEquals(index2ExpectedStats, statsHolder.getStatsForDimensionValues(List.of(index2Name))); + } + } + + /** + * Test aggregating by indices and tier + */ + public void testIndicesAndTierLevelAggregation() throws Exception { + internalCluster().startNodes( + 1, + Settings.builder() + .put(TieredSpilloverCacheIT.defaultSettings(HEAP_CACHE_SIZE_STRING)) + .put( + TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + new TimeValue(0, TimeUnit.SECONDS) + ) + .build() + ); + Client client = client(); + Map values = setupCacheForAggregationTests(client); + + ImmutableCacheStatsHolder allLevelsStatsHolder = getNodeCacheStatsResult( + client, + List.of(IndicesRequestCache.INDEX_DIMENSION_NAME, TIER_DIMENSION_NAME) + ); + + // Get values broken down by indices+tiers + ImmutableCacheStats index1HeapExpectedStats = returnNullIfAllZero( + new ImmutableCacheStats( + values.get("hitsOnHeapIndex1"), + values.get("itemsOnHeapIndex1AfterTest") + values.get("itemsOnDiskIndex1AfterTest") + values.get("hitsOnDiskIndex1"), + values.get("itemsOnDiskIndex1AfterTest"), + values.get("itemsOnHeapIndex1AfterTest") * values.get("singleSearchSize"), + values.get("itemsOnHeapIndex1AfterTest") + ) + ); + assertEquals( + index1HeapExpectedStats, + allLevelsStatsHolder.getStatsForDimensionValues(List.of(index1Name, TIER_DIMENSION_VALUE_ON_HEAP)) + ); + + ImmutableCacheStats index2HeapExpectedStats = returnNullIfAllZero( + new ImmutableCacheStats( + values.get("hitsOnHeapIndex2"), + values.get("itemsOnHeapIndex2AfterTest") + values.get("itemsOnDiskIndex2AfterTest") + values.get("hitsOnDiskIndex2"), + values.get("itemsOnDiskIndex2AfterTest"), + values.get("itemsOnHeapIndex2AfterTest") * values.get("singleSearchSize"), + values.get("itemsOnHeapIndex2AfterTest") + ) + ); + assertEquals( + index2HeapExpectedStats, + allLevelsStatsHolder.getStatsForDimensionValues(List.of(index2Name, TIER_DIMENSION_VALUE_ON_HEAP)) + ); + + ImmutableCacheStats index1DiskExpectedStats = returnNullIfAllZero( + new ImmutableCacheStats( + values.get("hitsOnDiskIndex1"), + values.get("itemsOnHeapIndex1AfterTest") + values.get("itemsOnDiskIndex1AfterTest"), + 0, + values.get("itemsOnDiskIndex1AfterTest") * values.get("singleSearchSize"), + values.get("itemsOnDiskIndex1AfterTest") + ) + ); + assertEquals( + index1DiskExpectedStats, + allLevelsStatsHolder.getStatsForDimensionValues(List.of(index1Name, TIER_DIMENSION_VALUE_DISK)) + ); + + ImmutableCacheStats index2DiskExpectedStats = returnNullIfAllZero( + new ImmutableCacheStats( + values.get("hitsOnDiskIndex2"), + values.get("itemsOnHeapIndex2AfterTest") + values.get("itemsOnDiskIndex2AfterTest"), + 0, + values.get("itemsOnDiskIndex2AfterTest") * values.get("singleSearchSize"), + values.get("itemsOnDiskIndex2AfterTest") + ) + ); + assertEquals( + index2DiskExpectedStats, + allLevelsStatsHolder.getStatsForDimensionValues(List.of(index2Name, TIER_DIMENSION_VALUE_DISK)) + ); + } + + /** + * Test aggregating by tier only + */ + public void testTierLevelAggregation() throws Exception { + internalCluster().startNodes( + 1, + Settings.builder() + .put(TieredSpilloverCacheIT.defaultSettings(HEAP_CACHE_SIZE_STRING)) + .put( + TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + new TimeValue(0, TimeUnit.SECONDS) + ) + .build() + ); + Client client = client(); + Map values = setupCacheForAggregationTests(client); + + // Get values for tiers alone and check they add correctly across indices + ImmutableCacheStatsHolder tiersOnlyStatsHolder = getNodeCacheStatsResult(client, List.of(TIER_DIMENSION_NAME)); + ImmutableCacheStats totalHeapExpectedStats = returnNullIfAllZero( + new ImmutableCacheStats( + values.get("hitsOnHeapIndex1") + values.get("hitsOnHeapIndex2"), + values.get("itemsOnHeapAfterTest") + values.get("itemsOnDiskAfterTest") + values.get("hitsOnDiskIndex1") + values.get( + "hitsOnDiskIndex2" + ), + values.get("itemsOnDiskAfterTest"), + values.get("itemsOnHeapAfterTest") * values.get("singleSearchSize"), + values.get("itemsOnHeapAfterTest") + ) + ); + ImmutableCacheStats heapStats = tiersOnlyStatsHolder.getStatsForDimensionValues(List.of(TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(totalHeapExpectedStats, heapStats); + ImmutableCacheStats totalDiskExpectedStats = returnNullIfAllZero( + new ImmutableCacheStats( + values.get("hitsOnDiskIndex1") + values.get("hitsOnDiskIndex2"), + values.get("itemsOnHeapAfterTest") + values.get("itemsOnDiskAfterTest"), + 0, + values.get("itemsOnDiskAfterTest") * values.get("singleSearchSize"), + values.get("itemsOnDiskAfterTest") + ) + ); + ImmutableCacheStats diskStats = tiersOnlyStatsHolder.getStatsForDimensionValues(List.of(TIER_DIMENSION_VALUE_DISK)); + assertEquals(totalDiskExpectedStats, diskStats); + } + + public void testInvalidLevelsAreIgnored() throws Exception { + internalCluster().startNodes( + 1, + Settings.builder() + .put(TieredSpilloverCacheIT.defaultSettings(HEAP_CACHE_SIZE_STRING)) + .put( + TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + new TimeValue(0, TimeUnit.SECONDS) + ) + .build() + ); + Client client = client(); + Map values = setupCacheForAggregationTests(client); + + ImmutableCacheStatsHolder allLevelsStatsHolder = getNodeCacheStatsResult( + client, + List.of(IndicesRequestCache.INDEX_DIMENSION_NAME, TIER_DIMENSION_NAME) + ); + ImmutableCacheStatsHolder indicesOnlyStatsHolder = getNodeCacheStatsResult( + client, + List.of(IndicesRequestCache.INDEX_DIMENSION_NAME) + ); + + // Test invalid levels are ignored and permuting the order of levels in the request doesn't matter + + // This should be equivalent to just "indices" + ImmutableCacheStatsHolder indicesEquivalentStatsHolder = getNodeCacheStatsResult( + client, + List.of(IndicesRequestCache.INDEX_DIMENSION_NAME, "unrecognized_dimension") + ); + assertEquals(indicesOnlyStatsHolder, indicesEquivalentStatsHolder); + + // This should be equivalent to "indices", "tier" + ImmutableCacheStatsHolder indicesAndTierEquivalentStatsHolder = getNodeCacheStatsResult( + client, + List.of(TIER_DIMENSION_NAME, "unrecognized_dimension_1", IndicesRequestCache.INDEX_DIMENSION_NAME, "unrecognized_dimension_2") + ); + assertEquals(allLevelsStatsHolder, indicesAndTierEquivalentStatsHolder); + + // This should be equivalent to no levels passed in + ImmutableCacheStatsHolder noLevelsEquivalentStatsHolder = getNodeCacheStatsResult( + client, + List.of("unrecognized_dimension_1", "unrecognized_dimension_2") + ); + ImmutableCacheStatsHolder noLevelsStatsHolder = getNodeCacheStatsResult(client, List.of()); + assertEquals(noLevelsStatsHolder, noLevelsEquivalentStatsHolder); + } + + /** + * Check the new stats API returns the same values as the old stats API. + */ + public void testStatsMatchOldApi() throws Exception { + internalCluster().startNodes( + 1, + Settings.builder() + .put(TieredSpilloverCacheIT.defaultSettings(HEAP_CACHE_SIZE_STRING)) + .put( + TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + new TimeValue(0, TimeUnit.SECONDS) + ) + .build() + ); + String index = "index"; + Client client = client(); + startIndex(client, index); + + // First search one time to see how big a single value will be + searchIndex(client, index, 0); + // get total stats + long singleSearchSize = getTotalStats(client).getSizeInBytes(); + // Select numbers so we get some values on both heap and disk + int itemsOnHeap = HEAP_CACHE_SIZE / (int) singleSearchSize; + int itemsOnDisk = 1 + randomInt(30); // The first one we search (to get the size) always goes to disk + int expectedEntries = itemsOnHeap + itemsOnDisk; + + for (int i = 1; i < expectedEntries; i++) { + // Cause misses + searchIndex(client, index, i); + } + int expectedMisses = itemsOnHeap + itemsOnDisk; + + // Cause some hits + int expectedHits = randomIntBetween(itemsOnHeap, expectedEntries); // Select it so some hits come from both tiers + for (int i = 0; i < expectedHits; i++) { + searchIndex(client, index, i); + } + + ImmutableCacheStats totalStats = getNodeCacheStatsResult(client, List.of()).getTotalStats(); + + // Check the new stats API values are as expected + assertEquals( + new ImmutableCacheStats(expectedHits, expectedMisses, 0, expectedEntries * singleSearchSize, expectedEntries), + totalStats + ); + // Now check the new stats API values for the cache as a whole match the old stats API values + RequestCacheStats oldAPIStats = client.admin() + .indices() + .prepareStats(index) + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache(); + assertEquals(oldAPIStats.getHitCount(), totalStats.getHits()); + assertEquals(oldAPIStats.getMissCount(), totalStats.getMisses()); + assertEquals(oldAPIStats.getEvictions(), totalStats.getEvictions()); + assertEquals(oldAPIStats.getMemorySizeInBytes(), totalStats.getSizeInBytes()); + } + + private void startIndex(Client client, String indexName) throws InterruptedException { + assertAcked( + client.admin() + .indices() + .prepareCreate(indexName) + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ) + .get() + ); + indexRandom(true, client.prepareIndex(indexName).setSource("k", "hello")); + ensureSearchable(indexName); + } + + private Map setupCacheForAggregationTests(Client client) throws Exception { + startIndex(client, index1Name); + startIndex(client, index2Name); + + // First search one time to see how big a single value will be + searchIndex(client, index1Name, 0); + // get total stats + long singleSearchSize = getTotalStats(client).getSizeInBytes(); + int itemsOnHeapAfterTest = HEAP_CACHE_SIZE / (int) singleSearchSize; // As the heap tier evicts, the items on it after the test will + // be the same as its max capacity + int itemsOnDiskAfterTest = 1 + randomInt(30); // The first one we search (to get the size) always goes to disk + + // Put some values on heap and disk for each index + int itemsOnHeapIndex1AfterTest = randomInt(itemsOnHeapAfterTest); + int itemsOnHeapIndex2AfterTest = itemsOnHeapAfterTest - itemsOnHeapIndex1AfterTest; + int itemsOnDiskIndex1AfterTest = 1 + randomInt(itemsOnDiskAfterTest - 1); + // The first one we search (to get the size) always goes to disk + int itemsOnDiskIndex2AfterTest = itemsOnDiskAfterTest - itemsOnDiskIndex1AfterTest; + int hitsOnHeapIndex1 = randomInt(itemsOnHeapIndex1AfterTest); + int hitsOnDiskIndex1 = randomInt(itemsOnDiskIndex1AfterTest); + int hitsOnHeapIndex2 = randomInt(itemsOnHeapIndex2AfterTest); + int hitsOnDiskIndex2 = randomInt(itemsOnDiskIndex2AfterTest); + + // Put these values into a map so tests can know what to expect in stats responses + Map expectedValues = new HashMap<>(); + expectedValues.put("itemsOnHeapIndex1AfterTest", itemsOnHeapIndex1AfterTest); + expectedValues.put("itemsOnHeapIndex2AfterTest", itemsOnHeapIndex2AfterTest); + expectedValues.put("itemsOnDiskIndex1AfterTest", itemsOnDiskIndex1AfterTest); + expectedValues.put("itemsOnDiskIndex2AfterTest", itemsOnDiskIndex2AfterTest); + expectedValues.put("hitsOnHeapIndex1", hitsOnHeapIndex1); + expectedValues.put("hitsOnDiskIndex1", hitsOnDiskIndex1); + expectedValues.put("hitsOnHeapIndex2", hitsOnHeapIndex2); + expectedValues.put("hitsOnDiskIndex2", hitsOnDiskIndex2); + expectedValues.put("singleSearchSize", (int) singleSearchSize); + expectedValues.put("itemsOnDiskAfterTest", itemsOnDiskAfterTest); + expectedValues.put("itemsOnHeapAfterTest", itemsOnHeapAfterTest); // Can only pass 10 keys in Map.of() constructor + + // The earliest items (0 - itemsOnDiskAfterTest) are the ones which get evicted to disk + for (int i = 1; i < itemsOnDiskIndex1AfterTest; i++) { // Start at 1 as 0 has already been searched + searchIndex(client, index1Name, i); + } + for (int i = itemsOnDiskIndex1AfterTest; i < itemsOnDiskIndex1AfterTest + itemsOnDiskIndex2AfterTest; i++) { + searchIndex(client, index2Name, i); + } + // The remaining items stay on heap + for (int i = itemsOnDiskAfterTest; i < itemsOnDiskAfterTest + itemsOnHeapIndex1AfterTest; i++) { + searchIndex(client, index1Name, i); + } + for (int i = itemsOnDiskAfterTest + itemsOnHeapIndex1AfterTest; i < itemsOnDiskAfterTest + itemsOnHeapAfterTest; i++) { + searchIndex(client, index2Name, i); + } + + // Get some hits on all combinations of indices and tiers + for (int i = itemsOnDiskAfterTest; i < itemsOnDiskAfterTest + hitsOnHeapIndex1; i++) { + // heap hits for index 1 + searchIndex(client, index1Name, i); + } + for (int i = itemsOnDiskAfterTest + itemsOnHeapIndex1AfterTest; i < itemsOnDiskAfterTest + itemsOnHeapIndex1AfterTest + + hitsOnHeapIndex2; i++) { + // heap hits for index 2 + searchIndex(client, index2Name, i); + } + for (int i = 0; i < hitsOnDiskIndex1; i++) { + // disk hits for index 1 + searchIndex(client, index1Name, i); + } + for (int i = itemsOnDiskIndex1AfterTest; i < itemsOnDiskIndex1AfterTest + hitsOnDiskIndex2; i++) { + // disk hits for index 2 + searchIndex(client, index2Name, i); + } + return expectedValues; + } + + private ImmutableCacheStats returnNullIfAllZero(ImmutableCacheStats expectedStats) { + // If the randomly chosen numbers are such that the expected stats would be 0, we actually have not interacted with the cache for + // this index. + // In this case, we expect the stats holder to have no stats for this node, and therefore we should get null from + // statsHolder.getStatsForDimensionValues(). + // We will not see it in the XContent response. + if (expectedStats.equals(new ImmutableCacheStats(0, 0, 0, 0, 0))) { + return null; + } + return expectedStats; + } + + // Duplicated from CacheStatsAPIIndicesRequestCacheIT.java, as we can't add a dependency on server.internalClusterTest + + private SearchResponse searchIndex(Client client, String index, int searchSuffix) { + SearchResponse resp = client.prepareSearch(index) + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k", "hello" + padWithZeros(4, searchSuffix))) + // pad with zeros so request 0 and request 10 have the same size ("0000" and "0010" instead of "0" and "10") + .get(); + assertSearchResponse(resp); + OpenSearchAssertions.assertAllSuccessful(resp); + return resp; + } + + private String padWithZeros(int finalLength, int inputValue) { + // Avoid forbidden API String.format() + String input = String.valueOf(inputValue); + if (input.length() >= finalLength) { + return input; + } + StringBuilder sb = new StringBuilder(); + while (sb.length() < finalLength - input.length()) { + sb.append('0'); + } + sb.append(input); + return sb.toString(); + } + + private ImmutableCacheStats getTotalStats(Client client) throws IOException { + ImmutableCacheStatsHolder statsHolder = getNodeCacheStatsResult(client, List.of()); + return statsHolder.getStatsForDimensionValues(List.of()); + } + + private static ImmutableCacheStatsHolder getNodeCacheStatsResult(Client client, List aggregationLevels) throws IOException { + CommonStatsFlags statsFlags = new CommonStatsFlags(); + statsFlags.includeAllCacheTypes(); + String[] flagsLevels; + if (aggregationLevels == null) { + flagsLevels = null; + } else { + flagsLevels = aggregationLevels.toArray(new String[0]); + } + statsFlags.setLevels(flagsLevels); + + NodesStatsResponse nodeStatsResponse = client.admin() + .cluster() + .prepareNodesStats("data:true") + .addMetric(NodesStatsRequest.Metric.CACHE_STATS.metricName()) + .setIndices(statsFlags) + .get(); + // Can always get the first data node as there's only one in this test suite + assertEquals(1, nodeStatsResponse.getNodes().size()); + NodeCacheStats ncs = nodeStatsResponse.getNodes().get(0).getNodeCacheStats(); + return ncs.getStatsByCache(CacheType.INDICES_REQUEST_CACHE); + } +} diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index f40c35dde83de..63cdbca101f2a 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -327,6 +327,7 @@ private Function, Tuple> getValueFromTieredCache(boolean void handleRemovalFromHeapTier(RemovalNotification, V> notification) { ICacheKey key = notification.getKey(); boolean wasEvicted = SPILLOVER_REMOVAL_REASONS.contains(notification.getRemovalReason()); + boolean countEvictionTowardsTotal = false; // Don't count this eviction towards the cache's total if it ends up in the disk tier if (caches.get(diskCache).isEnabled() && wasEvicted && evaluatePolicies(notification.getValue())) { try (ReleasableLock ignore = writeLock.acquire()) { diskCache.put(key, notification.getValue()); // spill over to the disk tier and increment its stats @@ -336,21 +337,28 @@ void handleRemovalFromHeapTier(RemovalNotification, V> notification // If the value is not going to the disk cache, send this notification to the TSC's removal listener // as the value is leaving the TSC entirely removalListener.onRemoval(notification); + countEvictionTowardsTotal = true; } - updateStatsOnRemoval(TIER_DIMENSION_VALUE_ON_HEAP, wasEvicted, key, notification.getValue()); + updateStatsOnRemoval(TIER_DIMENSION_VALUE_ON_HEAP, wasEvicted, key, notification.getValue(), countEvictionTowardsTotal); } void handleRemovalFromDiskTier(RemovalNotification, V> notification) { // Values removed from the disk tier leave the TSC entirely removalListener.onRemoval(notification); boolean wasEvicted = SPILLOVER_REMOVAL_REASONS.contains(notification.getRemovalReason()); - updateStatsOnRemoval(TIER_DIMENSION_VALUE_DISK, wasEvicted, notification.getKey(), notification.getValue()); + updateStatsOnRemoval(TIER_DIMENSION_VALUE_DISK, wasEvicted, notification.getKey(), notification.getValue(), true); } - void updateStatsOnRemoval(String removedFromTierValue, boolean wasEvicted, ICacheKey key, V value) { + void updateStatsOnRemoval( + String removedFromTierValue, + boolean wasEvicted, + ICacheKey key, + V value, + boolean countEvictionTowardsTotal + ) { List dimensionValues = statsHolder.getDimensionsWithTierValue(key.dimensions, removedFromTierValue); if (wasEvicted) { - statsHolder.incrementEvictions(dimensionValues); + statsHolder.incrementEvictions(dimensionValues, countEvictionTowardsTotal); } statsHolder.decrementItems(dimensionValues); statsHolder.decrementSizeInBytes(dimensionValues, weigher.applyAsLong(key, value)); diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java index d17059e8dee94..b40724430454b 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java @@ -105,20 +105,29 @@ public void incrementMisses(List dimensionValues) { internalIncrement(dimensionValues, missIncrementer, true); } + /** + * This method shouldn't be used in this class. Instead, use incrementEvictions(dimensionValues, includeInTotal) + * which specifies whether the eviction should be included in the cache's total evictions, or if it should + * just count towards that tier's evictions. + * @param dimensionValues The dimension values + */ @Override public void incrementEvictions(List dimensionValues) { - final String tierValue = validateTierDimensionValue(dimensionValues); + throw new UnsupportedOperationException( + "TieredSpilloverCacheHolder must specify whether to include an eviction in the total cache stats. Use incrementEvictions(List dimensionValues, boolean includeInTotal)" + ); + } - // If the disk tier is present, only evictions from the disk tier should be included in total values. + /** + * Increment evictions for this set of dimension values. + * @param dimensionValues The dimension values + * @param includeInTotal Whether to include this eviction in the total for the whole cache's evictions + */ + public void incrementEvictions(List dimensionValues, boolean includeInTotal) { + validateTierDimensionValue(dimensionValues); + // If we count this eviction towards the total, we should increment all ancestor nodes. If not, only increment the leaf node. Consumer evictionsIncrementer = (node) -> { - if (tierValue.equals(TIER_DIMENSION_VALUE_ON_HEAP) && diskCacheEnabled) { - // If on-heap tier, increment only the leaf node corresponding to the on heap values; not the total values in its parent - // nodes - if (node.isAtLowestLevel()) { - node.incrementEvictions(); - } - } else { - // If disk tier, or on-heap tier with a disabled disk tier, increment the leaf node and its parents + if (includeInTotal || node.isAtLowestLevel()) { node.incrementEvictions(); } }; diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java index 6c49341591589..54b15f236a418 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java @@ -926,14 +926,14 @@ public void testDiskTierPolicies() throws Exception { MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); TieredSpilloverCache tieredSpilloverCache = intializeTieredSpilloverCache( keyValueSize, - 100, + keyValueSize * 100, removalListener, Settings.builder() .put( OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) .get(MAXIMUM_SIZE_IN_BYTES_KEY) .getKey(), - onHeapCacheSize * 50 + "b" + onHeapCacheSize * keyValueSize + "b" ) .build(), 0, @@ -955,6 +955,7 @@ public void testDiskTierPolicies() throws Exception { LoadAwareCacheLoader, String> loader = getLoadAwareCacheLoader(keyValuePairs); + int expectedEvictions = 0; for (String key : keyValuePairs.keySet()) { ICacheKey iCacheKey = getICacheKey(key); Boolean expectedOutput = expectedOutputs.get(key); @@ -967,8 +968,15 @@ public void testDiskTierPolicies() throws Exception { } else { // Should miss as heap tier size = 0 and the policy rejected it assertNull(result); + expectedEvictions++; } } + + // We expect values that were evicted from the heap tier and not allowed into the disk tier by the policy + // to count towards total evictions + assertEquals(keyValuePairs.size(), getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(0, getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); // Disk tier is large enough for no evictions + assertEquals(expectedEvictions, getTotalStatsSnapshot(tieredSpilloverCache).getEvictions()); } public void testTookTimePolicyFromFactory() throws Exception { @@ -1493,6 +1501,11 @@ private ImmutableCacheStats getStatsSnapshotForTier(TieredSpilloverCache t return snapshot; } + private ImmutableCacheStats getTotalStatsSnapshot(TieredSpilloverCache tsc) throws IOException { + ImmutableCacheStatsHolder cacheStats = tsc.stats(new String[0]); + return cacheStats.getStatsForDimensionValues(List.of()); + } + // Duplicated here from EhcacheDiskCacheTests.java, we can't add a dependency on that plugin static class StringSerializer implements Serializer { private final Charset charset = StandardCharsets.UTF_8; diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java index 0d498e16154c8..3d48e96117a1c 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java @@ -354,6 +354,9 @@ public void testInvalidAssignment() { assertEquals(iae.getMessage(), "invalid assignment: cannot assign a value to addition operation [+]"); iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("Double.x() = 1;")); assertEquals(iae.getMessage(), "invalid assignment: cannot assign a value to method call [x/0]"); + + expectScriptThrows(UnsupportedOperationException.class, () -> exec("params['modifyingParamsMap'] = 2;")); + expectScriptThrows(UnsupportedOperationException.class, () -> exec("params.modifyingParamsMap = 2;")); } public void testCannotResolveSymbol() { diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/17_update_error.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/17_update_error.yml index 3d6db1b781caf..fdbc6de37e3ea 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/17_update_error.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/17_update_error.yml @@ -13,3 +13,50 @@ - match: { error.root_cause.0.position.offset: 13 } - match: { error.root_cause.0.position.start: 0 } - match: { error.root_cause.0.position.end: 38 } + +--- +"Test modifying params map from script leads to exception": + - skip: + features: "node_selector" + + - do: + put_script: + id: "except" + body: {"script": {"lang": "painless", "source": "params.that = 3"}} + + - do: + indices.create: + index: "test" + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + this: + type: "integer" + that: + type: "integer" + + - do: + index: + index: "test" + id: 1 + body: {"this": 1, "that": 2} + + - do: + catch: /unsupported_operation_exception/ + node_selector: + version: "2.15.0 - " + update: + index: "test" + id: 1 + body: + script: + id: "except" + params: {"this": 2} + + - match: { error.caused_by.position.offset: 6 } + - match: { error.caused_by.position.start: 0 } + - match: { error.caused_by.position.end: 15 } diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index f72374d63808a..c5438d58e679d 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -73,7 +73,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.stream.StreamSupport; import fixture.s3.S3HttpHandler; @@ -206,7 +205,12 @@ public void testRequestStats() throws Exception { } catch (RepositoryMissingException e) { return null; } - }).filter(Objects::nonNull).map(Repository::stats).reduce(RepositoryStats::merge).get(); + }).filter(b -> { + if (b instanceof BlobStoreRepository) { + return ((BlobStoreRepository) b).blobStore() != null; + } + return false; + }).map(Repository::stats).reduce(RepositoryStats::merge).get(); Map> extendedStats = repositoryStats.extendedStats; Map aggregatedStats = new HashMap<>(); diff --git a/release-notes/opensearch.release-notes-1.3.17.md b/release-notes/opensearch.release-notes-1.3.17.md new file mode 100644 index 0000000000000..5218b9e3be20c --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.17.md @@ -0,0 +1,6 @@ +## 2024-05-30 Version 1.3.17 Release Notes + +### Upgrades +- OpenJDK Update (April 2024 Patch releases), update to Eclipse Temurin 11.0.23+9 ([#13406](https://github.com/opensearch-project/OpenSearch/pull/13406)) +- Upgrade BouncyCastle dependencies from 1.75 to 1.78.1 resolving [CVE-2024-30172], [CVE-2024-30171] and [CVE-2024-29857] +- Bump `netty` from 4.1.109.Final to 4.1.110.Final ([#13802](https://github.com/opensearch-project/OpenSearch/pull/13802)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/50_multi_match.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/50_multi_match.yml new file mode 100644 index 0000000000000..34acb5985b555 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/50_multi_match.yml @@ -0,0 +1,35 @@ +"Cross fields do not return negative scores": + - skip: + version: " - 2.99.99" + reason: "This fix is in 2.15. Until we do the BWC dance, we need to skip all pre-3.0, though." + - do: + index: + index: test + id: 1 + body: { "color" : "orange red yellow" } + - do: + index: + index: test + id: 2 + body: { "color": "orange red purple", "shape": "red square" } + - do: + index: + index: test + id: 3 + body: { "color" : "orange red yellow purple" } + - do: + indices.refresh: { } + - do: + search: + index: test + body: + query: + multi_match: + query: "red" + type: "cross_fields" + fields: [ "color", "shape^100"] + tie_breaker: 0.1 + explain: true + - match: { hits.total.value: 3 } + - match: { hits.hits.0._id: "2" } + - gt: { hits.hits.2._score: 0.0 } diff --git a/server/build.gradle b/server/build.gradle index 9714f13ec67d6..15301e68fca3d 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -36,7 +36,7 @@ plugins { id('opensearch.publish') id('opensearch.internal-cluster-test') id('opensearch.optional-dependencies') - id('me.champeau.gradle.japicmp') version '0.4.2' + id('me.champeau.gradle.japicmp') version '0.4.3' } publishing { diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java index 0f441fe01a368..e96dedaa3e6a0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java @@ -93,15 +93,6 @@ public void testRemoteCleanupDeleteStale() throws Exception { initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); - // set cleanup interval to 100 ms to make the test faster - ClusterUpdateSettingsResponse response = client().admin() - .cluster() - .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(REMOTE_CLUSTER_STATE_CLEANUP_INTERVAL_SETTING.getKey(), "100ms")) - .get(); - - assertTrue(response.isAcknowledged()); - // update cluster state 21 times to ensure that clean up has run after this will upload 42 manifest files // to repository, if manifest files are less than that it means clean up has run updateClusterStateNTimes(RETAINED_MANIFESTS + SKIP_CLEANUP_STATE_CHANGES + 1); @@ -118,6 +109,15 @@ public void testRemoteCleanupDeleteStale() throws Exception { .add(getClusterState().metadata().clusterUUID()); BlobPath manifestContainerPath = baseMetadataPath.add("manifest"); + // set cleanup interval to 100 ms to make the test faster + ClusterUpdateSettingsResponse response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(REMOTE_CLUSTER_STATE_CLEANUP_INTERVAL_SETTING.getKey(), "100ms")) + .get(); + + assertTrue(response.isAcknowledged()); + assertBusy(() -> { int manifestFiles = repository.blobStore().blobContainer(manifestContainerPath).listBlobsByPrefix("manifest").size(); logger.info("number of current manifest file: {}", manifestFiles); @@ -129,6 +129,15 @@ public void testRemoteCleanupDeleteStale() throws Exception { manifestFiles >= RETAINED_MANIFESTS && manifestFiles < RETAINED_MANIFESTS + 2 * SKIP_CLEANUP_STATE_CHANGES ); }, 500, TimeUnit.MILLISECONDS); + + // disable the clean up to avoid race condition during shutdown + response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(REMOTE_CLUSTER_STATE_CLEANUP_INTERVAL_SETTING.getKey(), "-1")) + .get(); + + assertTrue(response.isAcknowledged()); } private void updateClusterStateNTimes(int n) { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java index de7a52761c77c..0539f96e429c1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java @@ -12,6 +12,7 @@ import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.search.SearchResponse; import org.opensearch.client.Client; @@ -20,7 +21,7 @@ import org.opensearch.common.cache.CacheType; import org.opensearch.common.cache.service.NodeCacheStats; import org.opensearch.common.cache.stats.ImmutableCacheStats; -import org.opensearch.common.cache.stats.ImmutableCacheStatsHolderTests; +import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; @@ -56,6 +57,10 @@ public static Collection parameters() { return Arrays.asList(new Object[] { Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, "true").build() }); } + /** + * Test aggregating by indices, indices+shards, shards, or no levels, and check the resulting stats + * are as we expect. + */ public void testCacheStatsAPIWIthOnHeapCache() throws Exception { String index1Name = "index1"; String index2Name = "index2"; @@ -73,84 +78,60 @@ public void testCacheStatsAPIWIthOnHeapCache() throws Exception { searchIndex(client, index2Name, ""); // First, aggregate by indices only - Map xContentMap = getNodeCacheStatsXContentMap(client, List.of(IndicesRequestCache.INDEX_DIMENSION_NAME)); + ImmutableCacheStatsHolder indicesStats = getNodeCacheStatsResult(client, List.of(IndicesRequestCache.INDEX_DIMENSION_NAME)); - List index1Keys = List.of(CacheType.INDICES_REQUEST_CACHE.getValue(), IndicesRequestCache.INDEX_DIMENSION_NAME, index1Name); + List index1Dimensions = List.of(index1Name); // Since we searched twice, we expect to see 1 hit, 1 miss and 1 entry for index 1 ImmutableCacheStats expectedStats = new ImmutableCacheStats(1, 1, 0, 0, 1); - checkCacheStatsAPIResponse(xContentMap, index1Keys, expectedStats, false, true); + checkCacheStatsAPIResponse(indicesStats, index1Dimensions, expectedStats, false, true); // Get the request size for one request, so we can reuse it for next index - int requestSize = (int) ((Map) ImmutableCacheStatsHolderTests.getValueFromNestedXContentMap( - xContentMap, - index1Keys - )).get(ImmutableCacheStats.Fields.SIZE_IN_BYTES); + long requestSize = indicesStats.getStatsForDimensionValues(List.of(index1Name)).getSizeInBytes(); assertTrue(requestSize > 0); - List index2Keys = List.of(CacheType.INDICES_REQUEST_CACHE.getValue(), IndicesRequestCache.INDEX_DIMENSION_NAME, index2Name); + List index2Dimensions = List.of(index2Name); // We searched once in index 2, we expect 1 miss + 1 entry expectedStats = new ImmutableCacheStats(0, 1, 0, requestSize, 1); - checkCacheStatsAPIResponse(xContentMap, index2Keys, expectedStats, true, true); + checkCacheStatsAPIResponse(indicesStats, index2Dimensions, expectedStats, true, true); // The total stats for the node should be 1 hit, 2 misses, and 2 entries expectedStats = new ImmutableCacheStats(1, 2, 0, 2 * requestSize, 2); - List totalStatsKeys = List.of(CacheType.INDICES_REQUEST_CACHE.getValue()); - checkCacheStatsAPIResponse(xContentMap, totalStatsKeys, expectedStats, true, true); + List totalStatsKeys = List.of(); + checkCacheStatsAPIResponse(indicesStats, totalStatsKeys, expectedStats, true, true); // Aggregate by shards only - xContentMap = getNodeCacheStatsXContentMap(client, List.of(IndicesRequestCache.SHARD_ID_DIMENSION_NAME)); + ImmutableCacheStatsHolder shardsStats = getNodeCacheStatsResult(client, List.of(IndicesRequestCache.SHARD_ID_DIMENSION_NAME)); - List index1Shard0Keys = List.of( - CacheType.INDICES_REQUEST_CACHE.getValue(), - IndicesRequestCache.SHARD_ID_DIMENSION_NAME, - "[" + index1Name + "][0]" - ); + List index1Shard0Dimensions = List.of("[" + index1Name + "][0]"); expectedStats = new ImmutableCacheStats(1, 1, 0, requestSize, 1); - checkCacheStatsAPIResponse(xContentMap, index1Shard0Keys, expectedStats, true, true); + checkCacheStatsAPIResponse(shardsStats, index1Shard0Dimensions, expectedStats, true, true); - List index2Shard0Keys = List.of( - CacheType.INDICES_REQUEST_CACHE.getValue(), - IndicesRequestCache.SHARD_ID_DIMENSION_NAME, - "[" + index2Name + "][0]" - ); + List index2Shard0Dimensions = List.of("[" + index2Name + "][0]"); expectedStats = new ImmutableCacheStats(0, 1, 0, requestSize, 1); - checkCacheStatsAPIResponse(xContentMap, index2Shard0Keys, expectedStats, true, true); + checkCacheStatsAPIResponse(shardsStats, index2Shard0Dimensions, expectedStats, true, true); // Aggregate by indices and shards - xContentMap = getNodeCacheStatsXContentMap( + ImmutableCacheStatsHolder indicesAndShardsStats = getNodeCacheStatsResult( client, List.of(IndicesRequestCache.INDEX_DIMENSION_NAME, IndicesRequestCache.SHARD_ID_DIMENSION_NAME) ); - index1Keys = List.of( - CacheType.INDICES_REQUEST_CACHE.getValue(), - IndicesRequestCache.INDEX_DIMENSION_NAME, - index1Name, - IndicesRequestCache.SHARD_ID_DIMENSION_NAME, - "[" + index1Name + "][0]" - ); + index1Dimensions = List.of(index1Name, "[" + index1Name + "][0]"); expectedStats = new ImmutableCacheStats(1, 1, 0, requestSize, 1); - checkCacheStatsAPIResponse(xContentMap, index1Keys, expectedStats, true, true); - - index2Keys = List.of( - CacheType.INDICES_REQUEST_CACHE.getValue(), - IndicesRequestCache.INDEX_DIMENSION_NAME, - index2Name, - IndicesRequestCache.SHARD_ID_DIMENSION_NAME, - "[" + index2Name + "][0]" - ); + checkCacheStatsAPIResponse(indicesAndShardsStats, index1Dimensions, expectedStats, true, true); + index2Dimensions = List.of(index2Name, "[" + index2Name + "][0]"); expectedStats = new ImmutableCacheStats(0, 1, 0, requestSize, 1); - checkCacheStatsAPIResponse(xContentMap, index2Keys, expectedStats, true, true); - + checkCacheStatsAPIResponse(indicesAndShardsStats, index2Dimensions, expectedStats, true, true); } - // TODO: Add testCacheStatsAPIWithTieredCache when TSC stats implementation PR is merged - + /** + * Check the new stats API returns the same values as the old stats API. In particular, + * check that the new and old APIs are both correctly estimating memory size, + * using the logic that includes the overhead memory in ICacheKey. + */ public void testStatsMatchOldApi() throws Exception { - // The main purpose of this test is to check that the new and old APIs are both correctly estimating memory size, - // using the logic that includes the overhead memory in ICacheKey. String index = "index"; Client client = client(); startIndex(client, index); @@ -173,8 +154,7 @@ public void testStatsMatchOldApi() throws Exception { .getRequestCache(); assertNotEquals(0, oldApiStats.getMemorySizeInBytes()); - List xContentMapKeys = List.of(CacheType.INDICES_REQUEST_CACHE.getValue()); - Map xContentMap = getNodeCacheStatsXContentMap(client, List.of()); + ImmutableCacheStatsHolder statsHolder = getNodeCacheStatsResult(client, List.of()); ImmutableCacheStats expected = new ImmutableCacheStats( oldApiStats.getHitCount(), oldApiStats.getMissCount(), @@ -183,9 +163,13 @@ public void testStatsMatchOldApi() throws Exception { 0 ); // Don't check entries, as the old API doesn't track this - checkCacheStatsAPIResponse(xContentMap, xContentMapKeys, expected, true, false); + checkCacheStatsAPIResponse(statsHolder, List.of(), expected, true, false); } + /** + * Test the XContent in the response behaves correctly when we pass null levels. + * Only the total cache stats should be returned. + */ public void testNullLevels() throws Exception { String index = "index"; Client client = client(); @@ -194,9 +178,81 @@ public void testNullLevels() throws Exception { for (int i = 0; i < numKeys; i++) { searchIndex(client, index, String.valueOf(i)); } - Map xContentMap = getNodeCacheStatsXContentMap(client, null); + Map xContentMap = getStatsXContent(getNodeCacheStatsResult(client, null)); // Null levels should result in only the total cache stats being returned -> 6 fields inside the response. - assertEquals(6, ((Map) xContentMap.get("request_cache")).size()); + assertEquals(6, xContentMap.size()); + } + + /** + * Test clearing the cache using API sets memory size and number of items to 0, but leaves other stats + * unaffected. + */ + public void testCacheClear() throws Exception { + String index = "index"; + Client client = client(); + + startIndex(client, index); + + int expectedHits = 2; + int expectedMisses = 7; + // Search for the same doc to give hits + for (int i = 0; i < expectedHits + 1; i++) { + searchIndex(client, index, ""); + } + // Search for new docs + for (int i = 0; i < expectedMisses - 1; i++) { + searchIndex(client, index, String.valueOf(i)); + } + + ImmutableCacheStats expectedTotal = new ImmutableCacheStats(expectedHits, expectedMisses, 0, 0, expectedMisses); + ImmutableCacheStatsHolder statsHolder = getNodeCacheStatsResult(client, List.of()); + // Don't check the memory size, just assert it's nonzero + checkCacheStatsAPIResponse(statsHolder, List.of(), expectedTotal, false, true); + long originalMemorySize = statsHolder.getTotalSizeInBytes(); + assertNotEquals(0, originalMemorySize); + + // Clear cache + ClearIndicesCacheRequest clearIndicesCacheRequest = new ClearIndicesCacheRequest(index); + client.admin().indices().clearCache(clearIndicesCacheRequest).actionGet(); + + // Now size and items should be 0 + expectedTotal = new ImmutableCacheStats(expectedHits, expectedMisses, 0, 0, 0); + statsHolder = getNodeCacheStatsResult(client, List.of()); + checkCacheStatsAPIResponse(statsHolder, List.of(), expectedTotal, true, true); + } + + /** + * Test the cache stats responses are in the expected place in XContent when we call the overall API + * GET /_nodes/stats. They should be at nodes.[node_id].caches.request_cache. + */ + public void testNodesStatsResponse() throws Exception { + String index = "index"; + Client client = client(); + + startIndex(client, index); + + NodesStatsResponse nodeStatsResponse = client.admin() + .cluster() + .prepareNodesStats("data:true") + .all() // This mimics /_nodes/stats + .get(); + XContentBuilder builder = XContentFactory.jsonBuilder(); + Map paramMap = new HashMap<>(); + ToXContent.Params params = new ToXContent.MapParams(paramMap); + + builder.startObject(); + nodeStatsResponse.toXContent(builder, params); + builder.endObject(); + Map xContentMap = XContentHelper.convertToMap(MediaTypeRegistry.JSON.xContent(), builder.toString(), true); + // Values should be at nodes.[node_id].caches.request_cache + // Get the node id + Map nodesResponse = (Map) xContentMap.get("nodes"); + assertEquals(1, nodesResponse.size()); + String nodeId = nodesResponse.keySet().toArray(String[]::new)[0]; + Map cachesResponse = (Map) ((Map) nodesResponse.get(nodeId)).get("caches"); + assertNotNull(cachesResponse); + // Request cache should be present in the response + assertTrue(cachesResponse.containsKey("request_cache")); } private void startIndex(Client client, String indexName) throws InterruptedException { @@ -227,8 +283,7 @@ private SearchResponse searchIndex(Client client, String index, String searchSuf return resp; } - private static Map getNodeCacheStatsXContentMap(Client client, List aggregationLevels) throws IOException { - + private static ImmutableCacheStatsHolder getNodeCacheStatsResult(Client client, List aggregationLevels) throws IOException { CommonStatsFlags statsFlags = new CommonStatsFlags(); statsFlags.includeAllCacheTypes(); String[] flagsLevels; @@ -248,16 +303,16 @@ private static Map getNodeCacheStatsXContentMap(Client client, L // Can always get the first data node as there's only one in this test suite assertEquals(1, nodeStatsResponse.getNodes().size()); NodeCacheStats ncs = nodeStatsResponse.getNodes().get(0).getNodeCacheStats(); + return ncs.getStatsByCache(CacheType.INDICES_REQUEST_CACHE); + } + private static Map getStatsXContent(ImmutableCacheStatsHolder statsHolder) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); Map paramMap = new HashMap<>(); - if (aggregationLevels != null && !aggregationLevels.isEmpty()) { - paramMap.put("level", String.join(",", aggregationLevels)); - } ToXContent.Params params = new ToXContent.MapParams(paramMap); builder.startObject(); - ncs.toXContent(builder, params); + statsHolder.toXContent(builder, params); builder.endObject(); String resultString = builder.toString(); @@ -265,27 +320,22 @@ private static Map getNodeCacheStatsXContentMap(Client client, L } private static void checkCacheStatsAPIResponse( - Map xContentMap, - List xContentMapKeys, + ImmutableCacheStatsHolder statsHolder, + List dimensionValues, ImmutableCacheStats expectedStats, boolean checkMemorySize, boolean checkEntries ) { - // Assumes the keys point to a level whose keys are the field values ("size_in_bytes", "evictions", etc) and whose values store - // those stats - Map aggregatedStatsResponse = (Map) ImmutableCacheStatsHolderTests.getValueFromNestedXContentMap( - xContentMap, - xContentMapKeys - ); + ImmutableCacheStats aggregatedStatsResponse = statsHolder.getStatsForDimensionValues(dimensionValues); assertNotNull(aggregatedStatsResponse); - assertEquals(expectedStats.getHits(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.HIT_COUNT)); - assertEquals(expectedStats.getMisses(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.MISS_COUNT)); - assertEquals(expectedStats.getEvictions(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.EVICTIONS)); + assertEquals(expectedStats.getHits(), (int) aggregatedStatsResponse.getHits()); + assertEquals(expectedStats.getMisses(), (int) aggregatedStatsResponse.getMisses()); + assertEquals(expectedStats.getEvictions(), (int) aggregatedStatsResponse.getEvictions()); if (checkMemorySize) { - assertEquals(expectedStats.getSizeInBytes(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.SIZE_IN_BYTES)); + assertEquals(expectedStats.getSizeInBytes(), (int) aggregatedStatsResponse.getSizeInBytes()); } if (checkEntries) { - assertEquals(expectedStats.getItems(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.ITEM_COUNT)); + assertEquals(expectedStats.getItems(), (int) aggregatedStatsResponse.getItems()); } } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java index 611dfc2756b29..0493bcf800c97 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java @@ -35,6 +35,7 @@ import java.util.concurrent.atomic.AtomicLong; import static org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.opensearch.repositories.fs.ReloadableFsRepository.REPOSITORIES_FAILRATE_SETTING; @@ -78,10 +79,11 @@ protected Settings nodeSettings(int nodeOrdinal) { .put(super.nodeSettings(nodeOrdinal)) .put(extraSettings) .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) .build(); } else { logger.info("Adding docrep node"); - return Settings.builder().put(super.nodeSettings(nodeOrdinal)).build(); + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true).build(); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java index 53f4ef3fe281f..c72b6851c1125 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java @@ -15,14 +15,21 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.opensearch.common.settings.Settings; +import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.index.remote.RemoteIndexPath; +import org.opensearch.index.remote.RemoteIndexPathUploader; +import org.opensearch.index.remote.RemoteStoreEnums; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import java.nio.file.Path; +import java.util.Arrays; import java.util.List; import java.util.function.Function; import java.util.stream.Collectors; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -454,6 +461,85 @@ public void testRemotePathMetadataAddedWithFirstPrimaryMovingToRemote() throws E assertRemoteProperties(indexName); } + /** + * Scenario: + * creates an index on docrep node with non-remote cluster-manager. + * make the cluster mixed, add remote cluster-manager and data nodes. + *

+ * exclude docrep nodes, assert that remote index path file exists + * when shards start relocating to the remote nodes. + */ + public void testRemoteIndexPathFileExistsAfterMigration() throws Exception { + String docrepClusterManager = internalCluster().startClusterManagerOnlyNode(); + + logger.info("---> Starting 2 docrep nodes"); + addRemote = false; + internalCluster().startDataOnlyNodes(2, Settings.builder().put("node.attr._type", "docrep").build()); + internalCluster().validateClusterFormed(); + + logger.info("---> Creating index with 1 primary and 1 replica"); + String indexName = "migration-index"; + Settings oneReplica = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + createIndexAndAssertDocrepProperties(indexName, oneReplica); + + String indexUUID = internalCluster().client() + .admin() + .indices() + .prepareGetSettings(indexName) + .get() + .getSetting(indexName, IndexMetadata.SETTING_INDEX_UUID); + + logger.info("---> Starting indexing in parallel"); + AsyncIndexingService indexingService = new AsyncIndexingService(indexName); + indexingService.startIndexing(); + + logger.info("---> Adding 2 remote enabled nodes to the cluster & cluster manager"); + initDocRepToRemoteMigration(); + addRemote = true; + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2, Settings.builder().put("node.attr._type", "remote").build()); + internalCluster().validateClusterFormed(); + + assertTrue( + internalCluster().client() + .admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), RemoteStoreEnums.PathType.HASHED_PREFIX) + ) + .get() + .isAcknowledged() + ); + + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(docrepClusterManager)); + internalCluster().validateClusterFormed(); + + logger.info("---> Excluding docrep nodes from allocation"); + excludeNodeSet("type", "docrep"); + + waitForRelocation(); + waitNoPendingTasksOnAll(); + indexingService.stopIndexing(); + + // validate remote index path file exists + logger.info("---> Asserting remote index path file exists"); + String fileNamePrefix = String.join(RemoteIndexPathUploader.DELIMITER, indexUUID, "7", RemoteIndexPath.DEFAULT_VERSION); + + assertTrue(FileSystemUtils.exists(translogRepoPath.resolve(RemoteIndexPath.DIR))); + Path[] files = FileSystemUtils.files(translogRepoPath.resolve(RemoteIndexPath.DIR)); + assertEquals(1, files.length); + assertTrue(Arrays.stream(files).anyMatch(file -> file.toString().contains(fileNamePrefix))); + + assertTrue(FileSystemUtils.exists(segmentRepoPath.resolve(RemoteIndexPath.DIR))); + files = FileSystemUtils.files(segmentRepoPath.resolve(RemoteIndexPath.DIR)); + assertEquals(1, files.length); + assertTrue(Arrays.stream(files).anyMatch(file -> file.toString().contains(fileNamePrefix))); + } + private void createIndexAndAssertDocrepProperties(String index, Settings settings) { createIndexAssertHealthAndDocrepProperties(index, settings, this::ensureGreen); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java index a58db51780826..01ad06757640c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java @@ -1914,14 +1914,8 @@ public void testRangeQueryWithTimeZone() throws Exception { * Test range with a custom locale, e.g. "de" in this case. Documents here mention the day of week * as "Mi" for "Mittwoch (Wednesday" and "Do" for "Donnerstag (Thursday)" and the month in the query * as "Dez" for "Dezember (December)". - * Note: this test currently needs the JVM arg `-Djava.locale.providers=SPI,COMPAT` to be set. - * When running with gradle this is done implicitly through the BuildPlugin, but when running from - * an IDE this might need to be set manually in the run configuration. See also CONTRIBUTING.md section - * on "Configuring IDEs And Running Tests". */ public void testRangeQueryWithLocaleMapping() throws Exception { - assert ("SPI,COMPAT".equals(System.getProperty("java.locale.providers"))) : "`-Djava.locale.providers=SPI,COMPAT` needs to be set"; - assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -1938,17 +1932,21 @@ public void testRangeQueryWithLocaleMapping() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("date_field", "Mi, 06 Dez 2000 02:55:00 -0800"), - client().prepareIndex("test").setId("2").setSource("date_field", "Do, 07 Dez 2000 02:55:00 -0800") + client().prepareIndex("test").setId("1").setSource("date_field", "Mi., 06 Dez. 2000 02:55:00 -0800"), + client().prepareIndex("test").setId("2").setSource("date_field", "Do., 07 Dez. 2000 02:55:00 -0800") ); SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Do, 07 Dez 2000 00:00:00 -0800")) + .setQuery( + QueryBuilders.rangeQuery("date_field").gte("Di., 05 Dez. 2000 02:55:00 -0800").lte("Do., 07 Dez. 2000 00:00:00 -0800") + ) .get(); assertHitCount(searchResponse, 1L); searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Fr, 08 Dez 2000 00:00:00 -0800")) + .setQuery( + QueryBuilders.rangeQuery("date_field").gte("Di., 05 Dez. 2000 02:55:00 -0800").lte("Fr., 08 Dez. 2000 00:00:00 -0800") + ) .get(); assertHitCount(searchResponse, 2L); } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index 90bb2b501764e..b41dd99ff6d40 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -47,6 +47,7 @@ import org.opensearch.node.Node; import org.opensearch.repositories.fs.FsRepository; import org.hamcrest.MatcherAssert; +import org.junit.After; import java.io.IOException; import java.nio.file.Files; @@ -62,6 +63,10 @@ import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; import static org.opensearch.core.common.util.CollectionUtils.iterableAsArrayList; +import static org.opensearch.index.store.remote.filecache.FileCacheSettings.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING; +import static org.opensearch.test.NodeRoles.clusterManagerOnlyNode; +import static org.opensearch.test.NodeRoles.dataNode; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -939,6 +944,52 @@ public void testRelocateSearchableSnapshotIndex() throws Exception { assertSearchableSnapshotIndexDirectoryExistence(searchNode2, index, false); } + public void testCreateSearchableSnapshotWithSpecifiedRemoteDataRatio() throws Exception { + final String snapshotName = "test-snap"; + final String repoName = "test-repo"; + final String indexName1 = "test-idx-1"; + final String restoredIndexName1 = indexName1 + "-copy"; + final String indexName2 = "test-idx-2"; + final String restoredIndexName2 = indexName2 + "-copy"; + final int numReplicasIndex1 = 1; + final int numReplicasIndex2 = 1; + + Settings clusterManagerNodeSettings = clusterManagerOnlyNode(); + internalCluster().startNodes(2, clusterManagerNodeSettings); + Settings dateNodeSettings = dataNode(); + internalCluster().startNodes(2, dateNodeSettings); + createIndexWithDocsAndEnsureGreen(numReplicasIndex1, 100, indexName1); + createIndexWithDocsAndEnsureGreen(numReplicasIndex2, 100, indexName2); + + final Client client = client(); + assertAcked( + client.admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.getKey(), 5)) + ); + + createRepositoryWithSettings(null, repoName); + takeSnapshot(client, snapshotName, repoName, indexName1, indexName2); + + internalCluster().ensureAtLeastNumSearchNodes(Math.max(numReplicasIndex1, numReplicasIndex2) + 1); + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); + + assertDocCount(restoredIndexName1, 100L); + assertDocCount(restoredIndexName2, 100L); + assertIndexDirectoryDoesNotExist(restoredIndexName1, restoredIndexName2); + } + + @After + public void cleanup() throws Exception { + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull(DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.getKey())) + ); + } + private void assertSearchableSnapshotIndexDirectoryExistence(String nodeName, Index index, boolean exists) throws Exception { final Node node = internalCluster().getInstance(Node.class, nodeName); final ShardId shardId = new ShardId(index, 0); diff --git a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java index b47b974b96fed..34e1e210d7137 100644 --- a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java @@ -120,6 +120,7 @@ protected void blend(final TermStates[] contexts, int maxDoc, IndexReader reader } int max = 0; long minSumTTF = Long.MAX_VALUE; + int[] docCounts = new int[contexts.length]; for (int i = 0; i < contexts.length; i++) { TermStates ctx = contexts[i]; int df = ctx.docFreq(); @@ -133,6 +134,7 @@ protected void blend(final TermStates[] contexts, int maxDoc, IndexReader reader // we need to find out the minimum sumTTF to adjust the statistics // otherwise the statistics don't match minSumTTF = Math.min(minSumTTF, reader.getSumTotalTermFreq(terms[i].field())); + docCounts[i] = reader.getDocCount(terms[i].field()); } } if (maxDoc > minSumTTF) { @@ -175,7 +177,11 @@ protected int compare(int i, int j) { if (prev > current) { actualDf++; } - contexts[i] = ctx = adjustDF(reader.getContext(), ctx, Math.min(maxDoc, actualDf)); + // Per field, we want to guarantee that the adjusted df does not exceed the number of docs with the field. + // That is, in the IDF formula (log(1 + (N - n + 0.5) / (n + 0.5))), we need to make sure that n (the + // adjusted df) is never bigger than N (the number of docs with the field). + int fieldMaxDoc = Math.min(maxDoc, docCounts[i]); + contexts[i] = ctx = adjustDF(reader.getContext(), ctx, Math.min(fieldMaxDoc, actualDf)); prev = current; sumTTF += ctx.totalTermFreq(); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index 9265c6ae60678..09cceca52ce23 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -50,7 +50,6 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; -import org.opensearch.index.IndexModule; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -59,8 +58,6 @@ import java.util.Set; import java.util.stream.Stream; -import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; - /** * Transport action for updating index settings * @@ -133,9 +130,7 @@ protected ClusterBlockException checkBlock(UpdateSettingsRequest request, Cluste for (Index index : requestIndices) { if (state.blocks().indexBlocked(ClusterBlockLevel.METADATA_WRITE, index.getName())) { allowSearchableSnapshotSettingsUpdate = allowSearchableSnapshotSettingsUpdate - && IndexModule.Type.REMOTE_SNAPSHOT.match( - state.getMetadata().getIndexSafe(index).getSettings().get(INDEX_STORE_TYPE_SETTING.getKey()) - ); + && state.getMetadata().getIndexSafe(index).isRemoteSnapshot(); } } // check if all settings in the request are in the allow list diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java index 4d108f8d78a69..ca2685e093d3f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java @@ -138,7 +138,7 @@ public CommonStatsFlags all() { includeUnloadedSegments = false; includeAllShardIndexingPressureTrackers = false; includeOnlyTopIndexingPressureMetrics = false; - includeCaches = EnumSet.noneOf(CacheType.class); + includeCaches = EnumSet.allOf(CacheType.class); levels = new String[0]; return this; } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterManagerMetrics.java b/server/src/main/java/org/opensearch/cluster/ClusterManagerMetrics.java index d48f82a388245..a98349a4af5cd 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterManagerMetrics.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterManagerMetrics.java @@ -8,6 +8,7 @@ package org.opensearch.cluster; +import org.opensearch.telemetry.metrics.Counter; import org.opensearch.telemetry.metrics.Histogram; import org.opensearch.telemetry.metrics.MetricsRegistry; import org.opensearch.telemetry.metrics.tags.Tags; @@ -23,6 +24,7 @@ public final class ClusterManagerMetrics { private static final String LATENCY_METRIC_UNIT_MS = "ms"; + private static final String COUNTER_METRICS_UNIT = "1"; public final Histogram clusterStateAppliersHistogram; public final Histogram clusterStateListenersHistogram; @@ -30,6 +32,9 @@ public final class ClusterManagerMetrics { public final Histogram clusterStateComputeHistogram; public final Histogram clusterStatePublishHistogram; + public final Counter leaderCheckFailureCounter; + public final Counter followerChecksFailureCounter; + public ClusterManagerMetrics(MetricsRegistry metricsRegistry) { clusterStateAppliersHistogram = metricsRegistry.createHistogram( "cluster.state.appliers.latency", @@ -56,6 +61,16 @@ public ClusterManagerMetrics(MetricsRegistry metricsRegistry) { "Histogram for recording time taken to publish a new cluster state", LATENCY_METRIC_UNIT_MS ); + followerChecksFailureCounter = metricsRegistry.createCounter( + "followers.checker.failure.count", + "Counter for number of failed follower checks", + COUNTER_METRICS_UNIT + ); + leaderCheckFailureCounter = metricsRegistry.createCounter( + "leader.checker.failure.count", + "Counter for number of failed leader checks", + COUNTER_METRICS_UNIT + ); } public void recordLatency(Histogram histogram, Double value) { @@ -69,4 +84,16 @@ public void recordLatency(Histogram histogram, Double value, Optional tags } histogram.record(value, tags.get()); } + + public void incrementCounter(Counter counter, Double value) { + incrementCounter(counter, value, Optional.empty()); + } + + public void incrementCounter(Counter counter, Double value, Optional tags) { + if (Objects.isNull(tags) || tags.isEmpty()) { + counter.add(value); + return; + } + counter.add(value, tags.get()); + } } diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java index 304136166d515..02a20b7681ba7 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java @@ -42,7 +42,6 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.rest.RestStatus; -import org.opensearch.index.IndexModule; import java.io.IOException; import java.util.Collections; @@ -399,7 +398,7 @@ public Builder addBlocks(IndexMetadata indexMetadata) { if (IndexMetadata.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING.get(indexMetadata.getSettings())) { addIndexBlock(indexName, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK); } - if (IndexModule.Type.REMOTE_SNAPSHOT.match(indexMetadata.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()))) { + if (indexMetadata.isRemoteSnapshot()) { addIndexBlock(indexName, IndexMetadata.REMOTE_READ_ONLY_ALLOW_DELETE); } return this; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 3d74feddfa261..f53e6837a67f5 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateTaskConfig; @@ -207,7 +208,8 @@ public Coordinator( ElectionStrategy electionStrategy, NodeHealthService nodeHealthService, PersistedStateRegistry persistedStateRegistry, - RemoteStoreNodeService remoteStoreNodeService + RemoteStoreNodeService remoteStoreNodeService, + ClusterManagerMetrics clusterManagerMetrics ) { this.settings = settings; this.transportService = transportService; @@ -261,14 +263,22 @@ public Coordinator( this::handlePublishRequest, this::handleApplyCommit ); - this.leaderChecker = new LeaderChecker(settings, clusterSettings, transportService, this::onLeaderFailure, nodeHealthService); + this.leaderChecker = new LeaderChecker( + settings, + clusterSettings, + transportService, + this::onLeaderFailure, + nodeHealthService, + clusterManagerMetrics + ); this.followersChecker = new FollowersChecker( settings, clusterSettings, transportService, this::onFollowerCheckRequest, this::removeNode, - nodeHealthService + nodeHealthService, + clusterManagerMetrics ); this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, logger); this.clusterApplier = clusterApplier; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java b/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java index 70bb0515bb022..2ec0dabd91786 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java @@ -35,6 +35,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.coordination.Coordinator.Mode; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; @@ -127,6 +128,7 @@ public class FollowersChecker { private final TransportService transportService; private final NodeHealthService nodeHealthService; private volatile FastResponseState fastResponseState; + private ClusterManagerMetrics clusterManagerMetrics; public FollowersChecker( Settings settings, @@ -134,7 +136,8 @@ public FollowersChecker( TransportService transportService, Consumer handleRequestAndUpdateState, BiConsumer onNodeFailure, - NodeHealthService nodeHealthService + NodeHealthService nodeHealthService, + ClusterManagerMetrics clusterManagerMetrics ) { this.settings = settings; this.transportService = transportService; @@ -161,6 +164,7 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti handleDisconnectedNode(node); } }); + this.clusterManagerMetrics = clusterManagerMetrics; } private void setFollowerCheckTimeout(TimeValue followerCheckTimeout) { @@ -413,6 +417,7 @@ public String executor() { } void failNode(String reason) { + clusterManagerMetrics.incrementCounter(clusterManagerMetrics.followerChecksFailureCounter, 1.0); transportService.getThreadPool().generic().execute(new Runnable() { @Override public void run() { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 5475470b81b93..f77a7ffc8ce8e 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -511,11 +511,27 @@ private static void ensureRemoteStoreNodesCompatibility(DiscoveryNode joiningNod assert existingNodes.isEmpty() == false; CompatibilityMode remoteStoreCompatibilityMode = REMOTE_STORE_COMPATIBILITY_MODE_SETTING.get(metadata.settings()); - if (STRICT.equals(remoteStoreCompatibilityMode)) { - DiscoveryNode existingNode = existingNodes.get(0); + List reposToSkip = new ArrayList<>(1); + Optional remoteRoutingTableNode = existingNodes.stream() + .filter( + node -> node.getAttributes().get(RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY) != null + ) + .findFirst(); + // If none of the existing nodes have routing table repo, then we skip this repo check if present in joining node. + // This ensures a new node with remote routing table repo is able to join the cluster. + if (remoteRoutingTableNode.isEmpty()) { + String joiningNodeRepoName = joiningNode.getAttributes() + .get(RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY); + if (joiningNodeRepoName != null) { + reposToSkip.add(joiningNodeRepoName); + } + } + + if (STRICT.equals(remoteStoreCompatibilityMode)) { + DiscoveryNode existingNode = remoteRoutingTableNode.orElseGet(() -> existingNodes.get(0)); if (joiningNode.isRemoteStoreNode()) { - ensureRemoteStoreNodesCompatibility(joiningNode, existingNode); + ensureRemoteStoreNodesCompatibility(joiningNode, existingNode, reposToSkip); } else { if (existingNode.isRemoteStoreNode()) { throw new IllegalStateException( @@ -537,19 +553,25 @@ private static void ensureRemoteStoreNodesCompatibility(DiscoveryNode joiningNod throw new IllegalStateException(reason); } if (joiningNode.isRemoteStoreNode()) { - Optional remoteDN = existingNodes.stream().filter(DiscoveryNode::isRemoteStoreNode).findFirst(); - remoteDN.ifPresent(discoveryNode -> ensureRemoteStoreNodesCompatibility(joiningNode, discoveryNode)); + Optional remoteDN = remoteRoutingTableNode.isPresent() + ? remoteRoutingTableNode + : existingNodes.stream().filter(DiscoveryNode::isRemoteStoreNode).findFirst(); + remoteDN.ifPresent(discoveryNode -> ensureRemoteStoreNodesCompatibility(joiningNode, discoveryNode, reposToSkip)); } } } } - private static void ensureRemoteStoreNodesCompatibility(DiscoveryNode joiningNode, DiscoveryNode existingNode) { + private static void ensureRemoteStoreNodesCompatibility( + DiscoveryNode joiningNode, + DiscoveryNode existingNode, + List reposToSkip + ) { if (joiningNode.isRemoteStoreNode()) { if (existingNode.isRemoteStoreNode()) { RemoteStoreNodeAttribute joiningRemoteStoreNodeAttribute = new RemoteStoreNodeAttribute(joiningNode); RemoteStoreNodeAttribute existingRemoteStoreNodeAttribute = new RemoteStoreNodeAttribute(existingNode); - if (existingRemoteStoreNodeAttribute.equals(joiningRemoteStoreNodeAttribute) == false) { + if (existingRemoteStoreNodeAttribute.equalsWithRepoSkip(joiningRemoteStoreNodeAttribute, reposToSkip) == false) { throw new IllegalStateException( "a remote store node [" + joiningNode diff --git a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java index 8d4373b865f62..4fd2c0eb13073 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.Nullable; @@ -119,17 +120,17 @@ public class LeaderChecker { private final TransportService transportService; private final Consumer onLeaderFailure; private final NodeHealthService nodeHealthService; - private AtomicReference currentChecker = new AtomicReference<>(); - private volatile DiscoveryNodes discoveryNodes; + private final ClusterManagerMetrics clusterManagerMetrics; LeaderChecker( final Settings settings, final ClusterSettings clusterSettings, final TransportService transportService, final Consumer onLeaderFailure, - NodeHealthService nodeHealthService + NodeHealthService nodeHealthService, + final ClusterManagerMetrics clusterManagerMetrics ) { this.settings = settings; leaderCheckInterval = LEADER_CHECK_INTERVAL_SETTING.get(settings); @@ -138,6 +139,7 @@ public class LeaderChecker { this.transportService = transportService; this.onLeaderFailure = onLeaderFailure; this.nodeHealthService = nodeHealthService; + this.clusterManagerMetrics = clusterManagerMetrics; clusterSettings.addSettingsUpdateConsumer(LEADER_CHECK_TIMEOUT_SETTING, this::setLeaderCheckTimeout); transportService.registerRequestHandler( @@ -293,7 +295,6 @@ public void handleResponse(Empty response) { logger.debug("closed check scheduler received a response, doing nothing"); return; } - failureCountSinceLastSuccess.set(0); scheduleNextWakeUp(); // logs trace message indicating success } @@ -304,7 +305,6 @@ public void handleException(TransportException exp) { logger.debug("closed check scheduler received a response, doing nothing"); return; } - if (exp instanceof ConnectTransportException || exp.getCause() instanceof ConnectTransportException) { logger.debug(new ParameterizedMessage("leader [{}] disconnected during check", leader), exp); leaderFailed(new ConnectTransportException(leader, "disconnected during check", exp)); @@ -355,6 +355,7 @@ public String executor() { void leaderFailed(Exception e) { if (isClosed.compareAndSet(false, true)) { + clusterManagerMetrics.incrementCounter(clusterManagerMetrics.leaderCheckFailureCounter, 1.0); transportService.getThreadPool().generic().execute(new Runnable() { @Override public void run() { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 8d2851c2e0561..9e7fe23f29872 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -65,6 +65,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.gateway.MetadataStateFormat; +import org.opensearch.index.IndexModule; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.indices.replication.common.ReplicationType; @@ -683,6 +684,7 @@ public static APIBlock readFrom(StreamInput input) throws IOException { private final ActiveShardCount waitForActiveShards; private final Map rolloverInfos; private final boolean isSystem; + private final boolean isRemoteSnapshot; private IndexMetadata( final Index index, @@ -743,6 +745,7 @@ private IndexMetadata( this.waitForActiveShards = waitForActiveShards; this.rolloverInfos = Collections.unmodifiableMap(rolloverInfos); this.isSystem = isSystem; + this.isRemoteSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match(this.settings); assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; } @@ -1204,6 +1207,10 @@ public boolean isSystem() { return isSystem; } + public boolean isRemoteSnapshot() { + return isRemoteSnapshot; + } + public static Builder builder(String index) { return new Builder(index); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index d016501dd0910..e1aa5626f36c1 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -180,11 +180,6 @@ static Custom fromXContent(XContentParser parser, String name) throws IOExceptio // handling any Exception is caller's responsibility return parser.namedObject(Custom.class, name, null); } - - static Custom fromXContent(XContentParser parser) throws IOException { - String currentFieldName = parser.currentName(); - return fromXContent(parser, currentFieldName); - } } public static final Setting DEFAULT_REPLICA_COUNT_SETTING = Setting.intSetting( diff --git a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java index 9b52bdd1b16c5..4b3dc7964a87b 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java @@ -51,8 +51,10 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.EnumSet; import java.util.List; +import java.util.stream.Collectors; import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; @@ -164,6 +166,40 @@ public boolean equalsIgnoreGenerations(@Nullable RepositoriesMetadata other) { return true; } + /** + * Checks if this instance and the give instance share the same repositories, with option to skip checking for a list of repos. + * This will support + * @param other other repositories metadata + * @param reposToSkip list of repos to skip check for equality + * @return {@code true} iff both instances contain the same repositories apart from differences in generations, not including repos provided in reposToSkip. + */ + public boolean equalsIgnoreGenerationsWithRepoSkip(@Nullable RepositoriesMetadata other, List reposToSkip) { + if (other == null) { + return false; + } + List currentRepositories = repositories.stream() + .filter(repo -> !reposToSkip.contains(repo.name())) + .collect(Collectors.toList()); + List otherRepositories = other.repositories.stream() + .filter(repo -> !reposToSkip.contains(repo.name())) + .collect(Collectors.toList()); + + if (otherRepositories.size() != currentRepositories.size()) { + return false; + } + // Sort repos by name for ordered comparison + Comparator compareByName = (o1, o2) -> o1.name().compareTo(o2.name()); + currentRepositories.sort(compareByName); + otherRepositories.sort(compareByName); + + for (int i = 0; i < currentRepositories.size(); i++) { + if (currentRepositories.get(i).equalsIgnoreGenerations(otherRepositories.get(i)) == false) { + return false; + } + } + return true; + } + @Override public int hashCode() { return repositories.hashCode(); diff --git a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java index 6a95c98815698..6158461c7d4e9 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java @@ -44,7 +44,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; import org.opensearch.node.ResponseCollectorService; @@ -242,9 +241,7 @@ public GroupShardsIterator searchShards( final Set set = new HashSet<>(shards.size()); for (IndexShardRoutingTable shard : shards) { IndexMetadata indexMetadataForShard = indexMetadata(clusterState, shard.shardId.getIndex().getName()); - if (IndexModule.Type.REMOTE_SNAPSHOT.match( - indexMetadataForShard.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()) - ) && (preference == null || preference.isEmpty())) { + if (indexMetadataForShard.isRemoteSnapshot() && (preference == null || preference.isEmpty())) { preference = Preference.PRIMARY.type(); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java index a4ff237460e28..db10ad61c7d6d 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java @@ -11,8 +11,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.allocation.RoutingAllocation; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.IndexModule; /** * {@link RoutingPool} defines the different node types based on the assigned capabilities. The methods @@ -60,10 +58,6 @@ public static RoutingPool getShardPool(ShardRouting shard, RoutingAllocation all * @return {@link RoutingPool} for the given index. */ public static RoutingPool getIndexPool(IndexMetadata indexMetadata) { - Settings indexSettings = indexMetadata.getSettings(); - if (IndexModule.Type.REMOTE_SNAPSHOT.match(indexSettings.get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()))) { - return REMOTE_CAPABLE; - } - return LOCAL_ONLY; + return indexMetadata.isRemoteSnapshot() ? REMOTE_CAPABLE : LOCAL_ONLY; } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 2c7df6b81e676..efa5115939d3c 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -54,6 +54,7 @@ import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.store.remote.filecache.FileCacheSettings; import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.snapshots.SnapshotShardSizeInfo; @@ -68,7 +69,6 @@ import static org.opensearch.cluster.routing.RoutingPool.getShardPool; import static org.opensearch.cluster.routing.allocation.DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING; import static org.opensearch.cluster.routing.allocation.DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING; -import static org.opensearch.index.store.remote.filecache.FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING; /** * The {@link DiskThresholdDecider} checks that the node a shard is potentially @@ -109,11 +109,13 @@ public class DiskThresholdDecider extends AllocationDecider { private final DiskThresholdSettings diskThresholdSettings; private final boolean enableForSingleDataNode; + private final FileCacheSettings fileCacheSettings; public DiskThresholdDecider(Settings settings, ClusterSettings clusterSettings) { this.diskThresholdSettings = new DiskThresholdSettings(settings, clusterSettings); assert Version.CURRENT.major < 9 : "remove enable_for_single_data_node in 9"; this.enableForSingleDataNode = ENABLE_FOR_SINGLE_DATA_NODE.get(settings); + this.fileCacheSettings = new FileCacheSettings(settings, clusterSettings); } /** @@ -179,6 +181,12 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing The following block enables allocation for remote shards within safeguard limits of the filecache. */ if (REMOTE_CAPABLE.equals(getNodePool(node)) && REMOTE_CAPABLE.equals(getShardPool(shardRouting, allocation))) { + final double dataToFileCacheSizeRatio = fileCacheSettings.getRemoteDataRatio(); + // we don't need to check the ratio + if (dataToFileCacheSizeRatio <= 0.1f) { + return Decision.YES; + } + final List remoteShardsOnNode = StreamSupport.stream(node.spliterator(), false) .filter(shard -> shard.primary() && REMOTE_CAPABLE.equals(getShardPool(shard, allocation))) .collect(Collectors.toList()); @@ -199,7 +207,6 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing final FileCacheStats fileCacheStats = clusterInfo.getNodeFileCacheStats().getOrDefault(node.nodeId(), null); final long nodeCacheSize = fileCacheStats != null ? fileCacheStats.getTotal().getBytes() : 0; final long totalNodeRemoteShardSize = currentNodeRemoteShardSize + shardSize; - final double dataToFileCacheSizeRatio = DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.get(allocation.metadata().settings()); if (dataToFileCacheSizeRatio > 0.0f && totalNodeRemoteShardSize > dataToFileCacheSizeRatio * nodeCacheSize) { return allocation.decision( Decision.NO, @@ -208,6 +215,8 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing ); } return Decision.YES; + } else if (REMOTE_CAPABLE.equals(getShardPool(shardRouting, allocation))) { + return Decision.NO; } Map usages = clusterInfo.getNodeMostAvailableDiskUsages(); diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java new file mode 100644 index 0000000000000..ba2208e17df1f --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.remote; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.blobstore.BlobStoreRepository; + +import java.io.IOException; +import java.util.function.Supplier; + +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteRoutingTableEnabled; + +/** + * A Service which provides APIs to upload and download routing table from remote store. + * + * @opensearch.internal + */ +public class RemoteRoutingTableService extends AbstractLifecycleComponent { + + private static final Logger logger = LogManager.getLogger(RemoteRoutingTableService.class); + private final Settings settings; + private final Supplier repositoriesService; + private BlobStoreRepository blobStoreRepository; + + public RemoteRoutingTableService(Supplier repositoriesService, Settings settings) { + assert isRemoteRoutingTableEnabled(settings) : "Remote routing table is not enabled"; + this.repositoriesService = repositoriesService; + this.settings = settings; + } + + @Override + protected void doClose() throws IOException { + if (blobStoreRepository != null) { + IOUtils.close(blobStoreRepository); + } + } + + @Override + protected void doStart() { + assert isRemoteRoutingTableEnabled(settings) == true : "Remote routing table is not enabled"; + final String remoteStoreRepo = settings.get( + Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY + ); + assert remoteStoreRepo != null : "Remote routing table repository is not configured"; + final Repository repository = repositoriesService.get().repository(remoteStoreRepo); + assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository"; + blobStoreRepository = (BlobStoreRepository) repository; + } + + @Override + protected void doStop() {} + +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/package-info.java b/server/src/main/java/org/opensearch/cluster/routing/remote/package-info.java new file mode 100644 index 0000000000000..9fe016e783f20 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Package containing class to perform operations on remote routing table */ +package org.opensearch.cluster.routing.remote; diff --git a/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java b/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java index 07c75eab34194..dd94dbf61debb 100644 --- a/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java +++ b/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java @@ -8,6 +8,7 @@ package org.opensearch.common.cache.service; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.cache.CacheType; @@ -51,6 +52,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NodesStatsRequest.Metric.CACHE_STATS.metricName()); for (CacheType type : statsByCache.keySet()) { if (flags.getIncludeCaches().contains(type)) { builder.startObject(type.getValue()); @@ -58,6 +60,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); } } + builder.endObject(); return builder; } @@ -77,4 +80,10 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(statsByCache, flags); } + + // Get the immutable cache stats for a given cache, used to avoid having to process XContent in tests. + // Safe to expose publicly as the ImmutableCacheStatsHolder can't be modified after its creation. + public ImmutableCacheStatsHolder getStatsByCache(CacheType cacheType) { + return statsByCache.get(cacheType); + } } diff --git a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java b/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java new file mode 100644 index 0000000000000..632b2b70d61df --- /dev/null +++ b/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.remote; + +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.PATH_DELIMITER; + +/** + * An extension of {@link RemoteWriteableEntity} class which caters to the use case of writing to and reading from a blob storage + * + * @param The class type which can be uploaded to or downloaded from a blob storage. + */ +public abstract class AbstractRemoteWritableBlobEntity implements RemoteWriteableEntity { + + protected String blobFileName; + + protected String blobName; + private final String clusterUUID; + private final Compressor compressor; + private final NamedXContentRegistry namedXContentRegistry; + private String[] pathTokens; + + public AbstractRemoteWritableBlobEntity( + final String clusterUUID, + final Compressor compressor, + final NamedXContentRegistry namedXContentRegistry + ) { + this.clusterUUID = clusterUUID; + this.compressor = compressor; + this.namedXContentRegistry = namedXContentRegistry; + } + + public abstract BlobPathParameters getBlobPathParameters(); + + public String getFullBlobName() { + return blobName; + } + + public String getBlobFileName() { + if (blobFileName == null) { + String[] pathTokens = getBlobPathTokens(); + if (pathTokens == null || pathTokens.length < 1) { + return null; + } + blobFileName = pathTokens[pathTokens.length - 1]; + } + return blobFileName; + } + + public String[] getBlobPathTokens() { + if (pathTokens != null) { + return pathTokens; + } + if (blobName == null) { + return null; + } + pathTokens = blobName.split(PATH_DELIMITER); + return pathTokens; + } + + public abstract String generateBlobFileName(); + + public String clusterUUID() { + return clusterUUID; + } + + public abstract UploadedMetadata getUploadedMetadata(); + + public void setFullBlobName(BlobPath blobPath) { + this.blobName = blobPath.buildAsString() + blobFileName; + } + + public NamedXContentRegistry getNamedXContentRegistry() { + return namedXContentRegistry; + } + + protected Compressor getCompressor() { + return compressor; + } + +} diff --git a/server/src/main/java/org/opensearch/common/remote/BlobPathParameters.java b/server/src/main/java/org/opensearch/common/remote/BlobPathParameters.java new file mode 100644 index 0000000000000..58c73a804b66a --- /dev/null +++ b/server/src/main/java/org/opensearch/common/remote/BlobPathParameters.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.remote; + +import java.util.List; + +/** + * Parameters which can be used to construct a blob path + * + */ +public class BlobPathParameters { + + private final List pathTokens; + private final String filePrefix; + + public BlobPathParameters(final List pathTokens, final String filePrefix) { + this.pathTokens = pathTokens; + this.filePrefix = filePrefix; + } + + public List getPathTokens() { + return pathTokens; + } + + public String getFilePrefix() { + return filePrefix; + } +} diff --git a/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityStore.java b/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityStore.java new file mode 100644 index 0000000000000..ccf7cafff1730 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityStore.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.remote; + +import org.opensearch.core.action.ActionListener; + +import java.io.IOException; + +/** + * An interface to read/write an object from/to a remote storage. This interface is agnostic of the remote storage type. + * + * @param The object type which can be uploaded to or downloaded from remote storage. + * @param The wrapper entity which provides methods for serializing/deserializing entity T. + */ +public interface RemoteWritableEntityStore> { + + public void writeAsync(U entity, ActionListener listener); + + public T read(U entity) throws IOException; + + public void readAsync(U entity, ActionListener listener); +} diff --git a/server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntity.java b/server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntity.java new file mode 100644 index 0000000000000..778c24dce2e27 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntity.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.remote; + +import java.io.IOException; +import java.io.InputStream; + +/** + * An interface to which provides defines the serialization/deserialization methods for objects to be uploaded to or downloaded from remote store. + * This interface is agnostic of the remote storage type. + * + * @param The object type which can be uploaded to or downloaded from remote storage. + */ +public interface RemoteWriteableEntity { + /** + * @return An InputStream created by serializing the entity T + * @throws IOException Exception encountered while serialization + */ + public InputStream serialize() throws IOException; + + /** + * @param inputStream The InputStream which is used to read the serialized entity + * @return The entity T after deserialization + * @throws IOException Exception encountered while deserialization + */ + public T deserialize(InputStream inputStream) throws IOException; + +} diff --git a/server/src/main/java/org/opensearch/common/remote/package-info.java b/server/src/main/java/org/opensearch/common/remote/package-info.java new file mode 100644 index 0000000000000..08ff9e910dc98 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/remote/package-info.java @@ -0,0 +1,11 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +/** + * Common remote store package + */ +package org.opensearch.common.remote; diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index d57ef7e780602..297fc98764d07 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -116,7 +116,7 @@ import org.opensearch.index.ShardIndexingPressureStore; import org.opensearch.index.remote.RemoteStorePressureSettings; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; -import org.opensearch.index.store.remote.filecache.FileCache; +import org.opensearch.index.store.remote.filecache.FileCacheSettings; import org.opensearch.indices.IndexingMemoryController; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.IndicesRequestCache; @@ -689,7 +689,7 @@ public void apply(Settings value, Settings current, Settings previous) { // Settings related to Searchable Snapshots Node.NODE_SEARCH_CACHE_SIZE_SETTING, - FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING, + FileCacheSettings.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING, // Settings related to Remote Refresh Segment Pressure RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED, diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index 7a364de1c5dc6..238df1bd90113 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -36,6 +36,7 @@ protected FeatureFlagSettings( FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING, FeatureFlags.TIERED_REMOTE_INDEX_SETTING, FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, - FeatureFlags.PLUGGABLE_CACHE_SETTING + FeatureFlags.PLUGGABLE_CACHE_SETTING, + FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL_SETTING ); } diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 62cfbd861d4d9..82f43921d2d28 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -67,6 +67,11 @@ public class FeatureFlags { */ public static final String PLUGGABLE_CACHE = "opensearch.experimental.feature.pluggable.caching.enabled"; + /** + * Gates the functionality of remote routing table. + */ + public static final String REMOTE_PUBLICATION_EXPERIMENTAL = "opensearch.experimental.feature.remote_store.publication.enabled"; + public static final Setting REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING = Setting.boolSetting( REMOTE_STORE_MIGRATION_EXPERIMENTAL, false, @@ -89,6 +94,12 @@ public class FeatureFlags { public static final Setting PLUGGABLE_CACHE_SETTING = Setting.boolSetting(PLUGGABLE_CACHE, false, Property.NodeScope); + public static final Setting REMOTE_PUBLICATION_EXPERIMENTAL_SETTING = Setting.boolSetting( + REMOTE_PUBLICATION_EXPERIMENTAL, + false, + Property.NodeScope + ); + private static final List> ALL_FEATURE_FLAG_SETTINGS = List.of( REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, EXTENSIONS_SETTING, @@ -96,7 +107,8 @@ public class FeatureFlags { TELEMETRY_SETTING, DATETIME_FORMATTER_CACHING_SETTING, TIERED_REMOTE_INDEX_SETTING, - PLUGGABLE_CACHE_SETTING + PLUGGABLE_CACHE_SETTING, + REMOTE_PUBLICATION_EXPERIMENTAL_SETTING ); /** * Should store the settings from opensearch.yml. diff --git a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java index 288371aa240a0..538dea5b2e60b 100644 --- a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java @@ -34,6 +34,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.Coordinator; import org.opensearch.cluster.coordination.ElectionStrategy; @@ -133,7 +134,8 @@ public DiscoveryModule( RerouteService rerouteService, NodeHealthService nodeHealthService, PersistedStateRegistry persistedStateRegistry, - RemoteStoreNodeService remoteStoreNodeService + RemoteStoreNodeService remoteStoreNodeService, + ClusterManagerMetrics clusterManagerMetrics ) { final Collection> joinValidators = new ArrayList<>(); final Map> hostProviders = new HashMap<>(); @@ -211,7 +213,8 @@ public DiscoveryModule( electionStrategy, nodeHealthService, persistedStateRegistry, - remoteStoreNodeService + remoteStoreNodeService, + clusterManagerMetrics ); } else { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java index 7622b07185f47..b3b1bf37f8696 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java @@ -41,6 +41,7 @@ public class ClusterMetadataManifest implements Writeable, ToXContentFragment { public static final int CODEC_V0 = 0; // Older codec version, where we haven't introduced codec versions for manifest. public static final int CODEC_V1 = 1; // In Codec V1 we have introduced global-metadata and codec version in Manifest file. public static final int CODEC_V2 = 2; // In Codec V2, there are seperate metadata files rather than a single global metadata file. + public static final int CODEC_V3 = 3; // In Codec V3, we introduce index routing-metadata in manifest file. private static final ParseField CLUSTER_TERM_FIELD = new ParseField("cluster_term"); private static final ParseField STATE_VERSION_FIELD = new ParseField("state_version"); @@ -58,6 +59,8 @@ public class ClusterMetadataManifest implements Writeable, ToXContentFragment { private static final ParseField UPLOADED_SETTINGS_METADATA = new ParseField("uploaded_settings_metadata"); private static final ParseField UPLOADED_TEMPLATES_METADATA = new ParseField("uploaded_templates_metadata"); private static final ParseField UPLOADED_CUSTOM_METADATA = new ParseField("uploaded_custom_metadata"); + private static final ParseField ROUTING_TABLE_VERSION_FIELD = new ParseField("routing_table_version"); + private static final ParseField INDICES_ROUTING_FIELD = new ParseField("indices_routing"); private static ClusterMetadataManifest.Builder manifestV0Builder(Object[] fields) { return ClusterMetadataManifest.builder() @@ -86,6 +89,12 @@ private static ClusterMetadataManifest.Builder manifestV2Builder(Object[] fields .customMetadataMap(customMetadata(fields)); } + private static ClusterMetadataManifest.Builder manifestV3Builder(Object[] fields) { + return manifestV2Builder(fields).codecVersion(codecVersion(fields)) + .routingTableVersion(routingTableVersion(fields)) + .indicesRouting(indicesRouting(fields)); + } + private static long term(Object[] fields) { return (long) fields[0]; } @@ -151,6 +160,14 @@ private static Map customMetadata(Object[] fi return customs.stream().collect(Collectors.toMap(UploadedMetadataAttribute::getAttributeName, Function.identity())); } + private static long routingTableVersion(Object[] fields) { + return (long) fields[15]; + } + + private static List indicesRouting(Object[] fields) { + return (List) fields[16]; + } + private static final ConstructingObjectParser PARSER_V0 = new ConstructingObjectParser<>( "cluster_metadata_manifest", fields -> manifestV0Builder(fields).build() @@ -166,12 +183,18 @@ private static Map customMetadata(Object[] fi fields -> manifestV2Builder(fields).build() ); - private static final ConstructingObjectParser CURRENT_PARSER = PARSER_V2; + private static final ConstructingObjectParser PARSER_V3 = new ConstructingObjectParser<>( + "cluster_metadata_manifest", + fields -> manifestV3Builder(fields).build() + ); + + private static final ConstructingObjectParser CURRENT_PARSER = PARSER_V3; static { declareParser(PARSER_V0, CODEC_V0); declareParser(PARSER_V1, CODEC_V1); declareParser(PARSER_V2, CODEC_V2); + declareParser(PARSER_V3, CODEC_V3); } private static void declareParser(ConstructingObjectParser parser, long codec_version) { @@ -216,6 +239,14 @@ private static void declareParser(ConstructingObjectParser= CODEC_V3) { + parser.declareLong(ConstructingObjectParser.constructorArg(), ROUTING_TABLE_VERSION_FIELD); + parser.declareObjectArray( + ConstructingObjectParser.constructorArg(), + (p, c) -> UploadedIndexMetadata.fromXContent(p), + INDICES_ROUTING_FIELD + ); + } } private final int codecVersion; @@ -234,6 +265,8 @@ private static void declareParser(ConstructingObjectParser indicesRouting; public List getIndices() { return indices; @@ -306,6 +339,14 @@ public boolean hasMetadataAttributesFiles() { || !uploadedCustomMetadataMap.isEmpty(); } + public long getRoutingTableVersion() { + return routingTableVersion; + } + + public List getIndicesRouting() { + return indicesRouting; + } + public ClusterMetadataManifest( long clusterTerm, long version, @@ -322,7 +363,9 @@ public ClusterMetadataManifest( UploadedMetadataAttribute uploadedCoordinationMetadata, UploadedMetadataAttribute uploadedSettingsMetadata, UploadedMetadataAttribute uploadedTemplatesMetadata, - Map uploadedCustomMetadataMap + Map uploadedCustomMetadataMap, + long routingTableVersion, + List indicesRouting ) { this.clusterTerm = clusterTerm; this.stateVersion = version; @@ -336,6 +379,8 @@ public ClusterMetadataManifest( this.indices = Collections.unmodifiableList(indices); this.previousClusterUUID = previousClusterUUID; this.clusterUUIDCommitted = clusterUUIDCommitted; + this.routingTableVersion = routingTableVersion; + this.indicesRouting = Collections.unmodifiableList(indicesRouting); this.uploadedCoordinationMetadata = uploadedCoordinationMetadata; this.uploadedSettingsMetadata = uploadedSettingsMetadata; this.uploadedTemplatesMetadata = uploadedTemplatesMetadata; @@ -364,6 +409,8 @@ public ClusterMetadataManifest(StreamInput in) throws IOException { in.readMap(StreamInput::readString, UploadedMetadataAttribute::new) ); this.globalMetadataFileName = null; + this.routingTableVersion = in.readLong(); + this.indicesRouting = Collections.unmodifiableList(in.readList(UploadedIndexMetadata::new)); } else if (in.getVersion().onOrAfter(Version.V_2_12_0)) { this.codecVersion = in.readInt(); this.globalMetadataFileName = in.readString(); @@ -371,6 +418,8 @@ public ClusterMetadataManifest(StreamInput in) throws IOException { this.uploadedSettingsMetadata = null; this.uploadedTemplatesMetadata = null; this.uploadedCustomMetadataMap = null; + this.routingTableVersion = -1; + this.indicesRouting = null; } else { this.codecVersion = CODEC_V0; // Default codec this.globalMetadataFileName = null; @@ -378,6 +427,8 @@ public ClusterMetadataManifest(StreamInput in) throws IOException { this.uploadedSettingsMetadata = null; this.uploadedTemplatesMetadata = null; this.uploadedCustomMetadataMap = null; + this.routingTableVersion = -1; + this.indicesRouting = null; } } @@ -401,7 +452,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startArray(INDICES_FIELD.getPreferredName()); { for (UploadedIndexMetadata uploadedIndexMetadata : indices) { + builder.startObject(); uploadedIndexMetadata.toXContent(builder, params); + builder.endObject(); } } builder.endArray(); @@ -433,6 +486,18 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(CODEC_VERSION_FIELD.getPreferredName(), getCodecVersion()); builder.field(GLOBAL_METADATA_FIELD.getPreferredName(), getGlobalMetadataFileName()); } + if (onOrAfterCodecVersion(CODEC_V3)) { + builder.field(ROUTING_TABLE_VERSION_FIELD.getPreferredName(), getRoutingTableVersion()); + builder.startArray(INDICES_ROUTING_FIELD.getPreferredName()); + { + for (UploadedIndexMetadata uploadedIndexMetadata : indicesRouting) { + builder.startObject(); + uploadedIndexMetadata.toXContent(builder, params); + builder.endObject(); + } + } + builder.endArray(); + } return builder; } @@ -454,6 +519,8 @@ public void writeTo(StreamOutput out) throws IOException { uploadedSettingsMetadata.writeTo(out); uploadedTemplatesMetadata.writeTo(out); out.writeMap(uploadedCustomMetadataMap, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeLong(routingTableVersion); + out.writeCollection(indicesRouting); } else if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeInt(codecVersion); out.writeString(globalMetadataFileName); @@ -480,7 +547,9 @@ public boolean equals(Object o) { && Objects.equals(previousClusterUUID, that.previousClusterUUID) && Objects.equals(clusterUUIDCommitted, that.clusterUUIDCommitted) && Objects.equals(globalMetadataFileName, that.globalMetadataFileName) - && Objects.equals(codecVersion, that.codecVersion); + && Objects.equals(codecVersion, that.codecVersion) + && Objects.equals(routingTableVersion, that.routingTableVersion) + && Objects.equals(indicesRouting, that.indicesRouting); } @Override @@ -497,7 +566,9 @@ public int hashCode() { nodeId, committed, previousClusterUUID, - clusterUUIDCommitted + clusterUUIDCommitted, + routingTableVersion, + indicesRouting ); } @@ -518,6 +589,10 @@ public static ClusterMetadataManifest fromXContentV1(XContentParser parser) thro return PARSER_V1.parse(parser, null); } + public static ClusterMetadataManifest fromXContentV2(XContentParser parser) throws IOException { + return PARSER_V2.parse(parser, null); + } + public static ClusterMetadataManifest fromXContent(XContentParser parser) throws IOException { return CURRENT_PARSER.parse(parser, null); } @@ -545,12 +620,24 @@ public static class Builder { private String previousClusterUUID; private boolean committed; private boolean clusterUUIDCommitted; + private long routingTableVersion; + private List indicesRouting; public Builder indices(List indices) { this.indices = indices; return this; } + public Builder routingTableVersion(long routingTableVersion) { + this.routingTableVersion = routingTableVersion; + return this; + } + + public Builder indicesRouting(List indicesRouting) { + this.indicesRouting = indicesRouting; + return this; + } + public Builder codecVersion(int codecVersion) { this.codecVersion = codecVersion; return this; @@ -625,6 +712,10 @@ public List getIndices() { return indices; } + public List getIndicesRouting() { + return indicesRouting; + } + public Builder previousClusterUUID(String previousClusterUUID) { this.previousClusterUUID = previousClusterUUID; return this; @@ -658,6 +749,8 @@ public Builder(ClusterMetadataManifest manifest) { this.indices = new ArrayList<>(manifest.indices); this.previousClusterUUID = manifest.previousClusterUUID; this.clusterUUIDCommitted = manifest.clusterUUIDCommitted; + this.routingTableVersion = manifest.routingTableVersion; + this.indicesRouting = new ArrayList<>(manifest.indicesRouting); } public ClusterMetadataManifest build() { @@ -677,7 +770,9 @@ public ClusterMetadataManifest build() { coordinationMetadata, settingsMetadata, templatesMetadata, - customMetadataMap + customMetadataMap, + routingTableVersion, + indicesRouting ); } @@ -777,11 +872,9 @@ public String getIndexUUID() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.startObject() - .field(INDEX_NAME_FIELD.getPreferredName(), getIndexName()) + return builder.field(INDEX_NAME_FIELD.getPreferredName(), getIndexName()) .field(INDEX_UUID_FIELD.getPreferredName(), getIndexUUID()) - .field(UPLOADED_FILENAME_FIELD.getPreferredName(), getUploadedFilePath()) - .endObject(); + .field(UPLOADED_FILENAME_FIELD.getPreferredName(), getUploadedFilePath()); } @Override diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java index 7edf62b997c4f..15db589a25e5a 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java @@ -72,7 +72,7 @@ public class RemoteClusterStateCleanupManager implements Closeable { private BlobStoreTransferService blobStoreTransferService; private TimeValue staleFileCleanupInterval; private final AtomicBoolean deleteStaleMetadataRunning = new AtomicBoolean(false); - private AsyncStaleFileDeletion staleFileDeletionTask; + private volatile AsyncStaleFileDeletion staleFileDeletionTask; private long lastCleanupAttemptStateVersion; private final ThreadPool threadpool; private final ClusterApplierService clusterApplierService; @@ -428,5 +428,10 @@ protected boolean mustReschedule() { protected void runInternal() { remoteClusterStateCleanupManager.cleanUpStaleFiles(); } + + @Override + protected String getThreadPool() { + return ThreadPool.Names.REMOTE_PURGE; + } } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 9bf1dff2359ca..d0593dcd51475 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -18,6 +18,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.TemplatesMetadata; +import org.opensearch.cluster.routing.remote.RemoteRoutingTableService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.CheckedRunnable; import org.opensearch.common.Nullable; @@ -69,6 +70,7 @@ import static java.util.Objects.requireNonNull; import static org.opensearch.gateway.PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteRoutingTableEnabled; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; /** @@ -147,7 +149,7 @@ public class RemoteClusterStateService implements Closeable { public static final ChecksumBlobStoreFormat CUSTOM_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( "custom", METADATA_NAME_FORMAT, - Metadata.Custom::fromXContent + null // no need of reader here, as this object is only used to write/serialize the object ); /** @@ -201,6 +203,7 @@ public class RemoteClusterStateService implements Closeable { private final List indexMetadataUploadListeners; private BlobStoreRepository blobStoreRepository; private BlobStoreTransferService blobStoreTransferService; + private Optional remoteRoutingTableService; private volatile TimeValue slowWriteLoggingThreshold; private volatile TimeValue indexMetadataUploadTimeout; @@ -213,7 +216,7 @@ public class RemoteClusterStateService implements Closeable { + "indices, coordination metadata updated : [{}], settings metadata updated : [{}], templates metadata " + "updated : [{}], custom metadata updated : [{}]"; public static final int INDEX_METADATA_CURRENT_CODEC_VERSION = 1; - public static final int MANIFEST_CURRENT_CODEC_VERSION = ClusterMetadataManifest.CODEC_V2; + public static final int MANIFEST_CURRENT_CODEC_VERSION = ClusterMetadataManifest.CODEC_V3; public static final int GLOBAL_METADATA_CURRENT_CODEC_VERSION = 2; // ToXContent Params with gateway mode. @@ -253,6 +256,9 @@ public RemoteClusterStateService( this.remoteStateStats = new RemotePersistenceStats(); this.remoteClusterStateCleanupManager = new RemoteClusterStateCleanupManager(this, clusterService); this.indexMetadataUploadListeners = indexMetadataUploadListeners; + this.remoteRoutingTableService = isRemoteRoutingTableEnabled(settings) + ? Optional.of(new RemoteRoutingTableService(repositoriesService, settings)) + : Optional.empty(); } /** @@ -749,6 +755,9 @@ public void close() throws IOException { if (blobStoreRepository != null) { IOUtils.close(blobStoreRepository); } + if (this.remoteRoutingTableService.isPresent()) { + this.remoteRoutingTableService.get().close(); + } } public void start() { @@ -761,6 +770,7 @@ public void start() { assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository"; blobStoreRepository = (BlobStoreRepository) repository; remoteClusterStateCleanupManager.start(); + this.remoteRoutingTableService.ifPresent(RemoteRoutingTableService::start); } private ClusterMetadataManifest uploadManifest( @@ -796,7 +806,10 @@ private ClusterMetadataManifest uploadManifest( uploadedCoordinationMetadata, uploadedSettingsMetadata, uploadedTemplatesMetadata, - uploadedCustomMetadataMap + uploadedCustomMetadataMap, + clusterState.routingTable().version(), + // TODO: Add actual list of changed indices routing with index routing upload flow. + new ArrayList<>() ); writeMetadataManifest(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID(), manifest, manifestFileName); return manifest; @@ -942,6 +955,11 @@ public TimeValue getMetadataManifestUploadTimeout() { return this.metadataManifestUploadTimeout; } + // Package private for unit test + Optional getRemoteRoutingTableService() { + return this.remoteRoutingTableService; + } + static String getManifestFileName(long term, long version, boolean committed, int codecVersion) { // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest/manifest______C/P____ return String.join( diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateUtils.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateUtils.java new file mode 100644 index 0000000000000..500d1af0211e8 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateUtils.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +/** + * Utility class for Remote Cluster State + */ +public class RemoteClusterStateUtils { + public static final String PATH_DELIMITER = "/"; + + public static String encodeString(String content) { + return Base64.getUrlEncoder().withoutPadding().encodeToString(content.getBytes(StandardCharsets.UTF_8)); + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java new file mode 100644 index 0000000000000..1aeecc4e70382 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.RemoteWritableEntityStore; +import org.opensearch.common.remote.RemoteWriteableEntity; +import org.opensearch.core.action.ActionListener; +import org.opensearch.gateway.remote.RemoteClusterStateUtils; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.io.InputStream; +import java.util.concurrent.ExecutorService; + +/** + * Abstract class for a blob type storage + * + * @param The entity which can be uploaded to / downloaded from blob store + * @param The concrete class implementing {@link RemoteWriteableEntity} which is used as a wrapper for T entity. + */ +public class RemoteClusterStateBlobStore> implements RemoteWritableEntityStore { + + private final BlobStoreTransferService transferService; + private final BlobStoreRepository blobStoreRepository; + private final String clusterName; + private final ExecutorService executorService; + + public RemoteClusterStateBlobStore( + final BlobStoreTransferService blobStoreTransferService, + final BlobStoreRepository blobStoreRepository, + final String clusterName, + final ThreadPool threadPool, + final String executor + ) { + this.transferService = blobStoreTransferService; + this.blobStoreRepository = blobStoreRepository; + this.clusterName = clusterName; + this.executorService = threadPool.executor(executor); + } + + @Override + public void writeAsync(final U entity, final ActionListener listener) { + try { + try (InputStream inputStream = entity.serialize()) { + BlobPath blobPath = getBlobPathForUpload(entity); + entity.setFullBlobName(blobPath); + // TODO uncomment below logic after merging PR https://github.com/opensearch-project/OpenSearch/pull/13836 + // transferService.uploadBlob(inputStream, getBlobPathForUpload(entity), entity.getBlobFileName(), WritePriority.URGENT, + // listener); + } + } catch (Exception e) { + listener.onFailure(e); + } + } + + public T read(final U entity) throws IOException { + // TODO Add timing logs and tracing + assert entity.getFullBlobName() != null; + return entity.deserialize(transferService.downloadBlob(getBlobPathForDownload(entity), entity.getBlobFileName())); + } + + @Override + public void readAsync(final U entity, final ActionListener listener) { + executorService.execute(() -> { + try { + listener.onResponse(read(entity)); + } catch (Exception e) { + listener.onFailure(e); + } + }); + } + + private BlobPath getBlobPathForUpload(final AbstractRemoteWritableBlobEntity obj) { + BlobPath blobPath = blobStoreRepository.basePath() + .add(RemoteClusterStateUtils.encodeString(clusterName)) + .add("cluster-state") + .add(obj.clusterUUID()); + for (String token : obj.getBlobPathParameters().getPathTokens()) { + blobPath = blobPath.add(token); + } + return blobPath; + } + + private BlobPath getBlobPathForDownload(final AbstractRemoteWritableBlobEntity obj) { + String[] pathTokens = obj.getBlobPathTokens(); + BlobPath blobPath = new BlobPath(); + if (pathTokens == null || pathTokens.length < 1) { + return blobPath; + } + // Iterate till second last path token to get the blob folder + for (int i = 0; i < pathTokens.length - 1; i++) { + blobPath = blobPath.add(pathTokens[i]); + } + return blobPath; + } + +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/package-info.java b/server/src/main/java/org/opensearch/gateway/remote/model/package-info.java new file mode 100644 index 0000000000000..c0d13d15cc885 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Package containing models for remote cluster state + */ +package org.opensearch.gateway.remote.model; diff --git a/server/src/main/java/org/opensearch/gateway/remote/routingtable/IndexRoutingTableHeader.java b/server/src/main/java/org/opensearch/gateway/remote/routingtable/IndexRoutingTableHeader.java new file mode 100644 index 0000000000000..5baea6adba0c7 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/routingtable/IndexRoutingTableHeader.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.routingtable; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.index.IndexFormatTooNewException; +import org.apache.lucene.index.IndexFormatTooOldException; +import org.apache.lucene.store.InputStreamDataInput; +import org.apache.lucene.store.OutputStreamDataOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; + +import java.io.EOFException; +import java.io.IOException; + +/** + * The stored header information for the individual index routing table + */ +public class IndexRoutingTableHeader implements Writeable { + + public static final String INDEX_ROUTING_HEADER_CODEC = "index_routing_header_codec"; + public static final int INITIAL_VERSION = 1; + public static final int CURRENT_VERSION = INITIAL_VERSION; + private final String indexName; + + public IndexRoutingTableHeader(String indexName) { + this.indexName = indexName; + } + + /** + * Reads the contents on the stream into the corresponding {@link IndexRoutingTableHeader} + * + * @param in streamInput + * @throws IOException exception thrown on failing to read from stream. + */ + public IndexRoutingTableHeader(StreamInput in) throws IOException { + try { + readHeaderVersion(in); + indexName = in.readString(); + } catch (EOFException e) { + throw new IOException("index routing header truncated", e); + } + } + + private void readHeaderVersion(final StreamInput in) throws IOException { + try { + CodecUtil.checkHeader(new InputStreamDataInput(in), INDEX_ROUTING_HEADER_CODEC, INITIAL_VERSION, CURRENT_VERSION); + } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException e) { + throw new IOException("index routing table header corrupted", e); + } + } + + /** + * Write the IndexRoutingTable to given stream. + * + * @param out stream to write + * @throws IOException exception thrown on failing to write to stream. + */ + public void writeTo(StreamOutput out) throws IOException { + try { + CodecUtil.writeHeader(new OutputStreamDataOutput(out), INDEX_ROUTING_HEADER_CODEC, CURRENT_VERSION); + out.writeString(indexName); + out.flush(); + } catch (IOException e) { + throw new IOException("Failed to write IndexRoutingTable header", e); + } + } + + public String getIndexName() { + return indexName; + } + +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTable.java b/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTable.java new file mode 100644 index 0000000000000..17c55190da07f --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTable.java @@ -0,0 +1,100 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.routingtable; + +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.core.common.io.stream.BufferedChecksumStreamInput; +import org.opensearch.core.common.io.stream.BufferedChecksumStreamOutput; +import org.opensearch.core.common.io.stream.InputStreamStreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.Index; + +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; + +/** + * Remote store object for IndexRoutingTable + */ +public class RemoteIndexRoutingTable implements Writeable { + + private final IndexRoutingTable indexRoutingTable; + + public RemoteIndexRoutingTable(IndexRoutingTable indexRoutingTable) { + this.indexRoutingTable = indexRoutingTable; + } + + /** + * Reads data from inputStream and creates RemoteIndexRoutingTable object with the {@link IndexRoutingTable} + * @param inputStream input stream with index routing data + * @param index index for the current routing data + * @throws IOException exception thrown on failing to read from stream. + */ + public RemoteIndexRoutingTable(InputStream inputStream, Index index) throws IOException { + try { + try (BufferedChecksumStreamInput in = new BufferedChecksumStreamInput(new InputStreamStreamInput(inputStream), "assertion")) { + // Read the Table Header first and confirm the index + IndexRoutingTableHeader indexRoutingTableHeader = new IndexRoutingTableHeader(in); + assert indexRoutingTableHeader.getIndexName().equals(index.getName()); + + int numberOfShardRouting = in.readVInt(); + IndexRoutingTable.Builder indicesRoutingTable = IndexRoutingTable.builder(index); + for (int idx = 0; idx < numberOfShardRouting; idx++) { + IndexShardRoutingTable indexShardRoutingTable = IndexShardRoutingTable.Builder.readFrom(in); + indicesRoutingTable.addIndexShard(indexShardRoutingTable); + } + verifyCheckSum(in); + indexRoutingTable = indicesRoutingTable.build(); + } + } catch (EOFException e) { + throw new IOException("Indices Routing table is corrupted", e); + } + } + + public IndexRoutingTable getIndexRoutingTable() { + return indexRoutingTable; + } + + /** + * Writes {@link IndexRoutingTable} to the given stream + * @param streamOutput output stream to write + * @throws IOException exception thrown on failing to write to stream. + */ + @Override + public void writeTo(StreamOutput streamOutput) throws IOException { + try { + BufferedChecksumStreamOutput out = new BufferedChecksumStreamOutput(streamOutput); + IndexRoutingTableHeader indexRoutingTableHeader = new IndexRoutingTableHeader(indexRoutingTable.getIndex().getName()); + indexRoutingTableHeader.writeTo(out); + out.writeVInt(indexRoutingTable.shards().size()); + for (IndexShardRoutingTable next : indexRoutingTable) { + IndexShardRoutingTable.Builder.writeTo(next, out); + } + out.writeLong(out.getChecksum()); + out.flush(); + } catch (IOException e) { + throw new IOException("Failed to write IndexRoutingTable to stream", e); + } + } + + private void verifyCheckSum(BufferedChecksumStreamInput in) throws IOException { + long expectedChecksum = in.getChecksum(); + long readChecksum = in.readLong(); + if (readChecksum != expectedChecksum) { + throw new IOException( + "checksum verification failed - expected: 0x" + + Long.toHexString(expectedChecksum) + + ", got: 0x" + + Long.toHexString(readChecksum) + ); + } + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/routingtable/package-info.java b/server/src/main/java/org/opensearch/gateway/remote/routingtable/package-info.java new file mode 100644 index 0000000000000..a6cb2251a5dd7 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/routingtable/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Package containing class to perform operations on remote routing table. + */ +package org.opensearch.gateway.remote.routingtable; diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 2b0a62c18d5a7..6c0ab2f6b0153 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -728,7 +728,6 @@ public static IndexMergePolicy fromString(String text) { private volatile TimeValue remoteTranslogUploadBufferInterval; private final String remoteStoreTranslogRepository; private final String remoteStoreRepository; - private final boolean isRemoteSnapshot; private int remoteTranslogKeepExtraGen; private Version extendedCompatibilitySnapshotVersion; @@ -919,9 +918,8 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti remoteTranslogUploadBufferInterval = INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); remoteStoreRepository = settings.get(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY); this.remoteTranslogKeepExtraGen = INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.get(settings); - isRemoteSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match(this.settings); - if (isRemoteSnapshot && FeatureFlags.isEnabled(SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY)) { + if (isRemoteSnapshot() && FeatureFlags.isEnabled(SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY)) { extendedCompatibilitySnapshotVersion = SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; } else { extendedCompatibilitySnapshotVersion = Version.CURRENT.minimumIndexCompatibilityVersion(); @@ -1278,7 +1276,7 @@ public String getRemoteStoreTranslogRepository() { * Returns true if this is remote/searchable snapshot */ public boolean isRemoteSnapshot() { - return isRemoteSnapshot; + return indexMetadata.isRemoteSnapshot(); } /** diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedField.java b/server/src/main/java/org/opensearch/index/mapper/DerivedField.java index 7ebe4e5f0b0e8..b502e41cbb97b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DerivedField.java +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedField.java @@ -8,6 +8,7 @@ package org.opensearch.index.mapper; +import org.opensearch.Version; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -18,6 +19,7 @@ import org.opensearch.script.Script; import java.io.IOException; +import java.util.Map; import java.util.Objects; /** @@ -25,10 +27,13 @@ */ @PublicApi(since = "2.14.0") public class DerivedField implements Writeable, ToXContentFragment { - private final String name; private final String type; private final Script script; + private String sourceIndexedField; + private Map properties; + private Boolean ignoreMalformed; + private String format; public DerivedField(String name, String type, Script script) { this.name = name; @@ -40,6 +45,14 @@ public DerivedField(StreamInput in) throws IOException { name = in.readString(); type = in.readString(); script = new Script(in); + if (in.getVersion().onOrAfter(Version.V_2_15_0)) { + if (in.readBoolean()) { + properties = in.readMap(); + } + sourceIndexedField = in.readOptionalString(); + format = in.readOptionalString(); + ignoreMalformed = in.readOptionalBoolean(); + } } @Override @@ -47,6 +60,17 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeString(type); script.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_2_15_0)) { + if (properties == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeMap(properties); + } + out.writeOptionalString(sourceIndexedField); + out.writeOptionalString(format); + out.writeOptionalBoolean(ignoreMalformed); + } } @Override @@ -54,6 +78,18 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par builder.startObject(name); builder.field("type", type); builder.field("script", script); + if (properties != null) { + builder.field("properties", properties); + } + if (sourceIndexedField != null) { + builder.field("source_indexed_field", sourceIndexedField); + } + if (format != null) { + builder.field("format", format); + } + if (ignoreMalformed != null) { + builder.field("ignore_malformed", ignoreMalformed); + } builder.endObject(); return builder; } @@ -70,9 +106,41 @@ public Script getScript() { return script; } + public Map getProperties() { + return properties; + } + + public String getSourceIndexedField() { + return sourceIndexedField; + } + + public String getFormat() { + return format; + } + + public boolean getIgnoreMalformed() { + return Boolean.TRUE.equals(ignoreMalformed); + } + + public void setProperties(Map properties) { + this.properties = properties; + } + + public void setSourceIndexedField(String sourceIndexedField) { + this.sourceIndexedField = sourceIndexedField; + } + + public void setFormat(String format) { + this.format = format; + } + + public void setIgnoreMalformed(boolean ignoreMalformed) { + this.ignoreMalformed = ignoreMalformed; + } + @Override public int hashCode() { - return Objects.hash(name, type, script); + return Objects.hash(name, type, script, sourceIndexedField, properties, ignoreMalformed, format); } @Override @@ -84,7 +152,12 @@ public boolean equals(Object obj) { return false; } DerivedField other = (DerivedField) obj; - return Objects.equals(name, other.name) && Objects.equals(type, other.type) && Objects.equals(script, other.script); + return Objects.equals(name, other.name) + && Objects.equals(type, other.type) + && Objects.equals(script, other.script) + && Objects.equals(sourceIndexedField, other.sourceIndexedField) + && Objects.equals(properties, other.properties) + && Objects.equals(ignoreMalformed, other.ignoreMalformed) + && Objects.equals(format, other.format); } - } diff --git a/server/src/main/java/org/opensearch/index/mapper/FieldTypeInference.java b/server/src/main/java/org/opensearch/index/mapper/FieldTypeInference.java new file mode 100644 index 0000000000000..713bdc4e691cd --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/FieldTypeInference.java @@ -0,0 +1,181 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.ReaderUtil; +import org.opensearch.common.Randomness; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.search.lookup.SourceLookup; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Random; +import java.util.Set; +import java.util.TreeSet; + +/** + * This class performs type inference by analyzing the _source documents. It uses a random sample of documents to infer the field type, similar to dynamic mapping type guessing logic. + * Unlike guessing based on the first document, where field could be missing, this method generates a random sample to make a more accurate inference. + * This approach is especially useful for handling missing fields, which is common in nested fields within derived fields of object types. + * + *

The sample size should be chosen carefully to ensure a high probability of selecting at least one document where the field is present. + * However, it's essential to strike a balance because a large sample size can lead to performance issues since each sample document's _source field is loaded and examined until the field is found. + * + *

Determining the sample size ({@code S}) is akin to deciding how many balls to draw from a bin, ensuring a high probability ({@code >=P}) of drawing at least one green ball (documents with the field) from a mixture of {@code R } red balls (documents without the field) and {@code G } green balls: + *

{@code
+ * P >= 1 - C(R, S) / C(R + G, S)
+ * }
+ * Here, {@code C()} represents the binomial coefficient. + * For a high confidence level, we aim for {@code P >= 0.95 }. For example, with {@code 10^7 } documents where the field is present in {@code 2% } of them, the sample size {@code S } should be around 149 to achieve a probability of {@code 0.95}. + */ +public class FieldTypeInference { + private final IndexReader indexReader; + private final String indexName; + private final MapperService mapperService; + // TODO expose using a index setting + private int sampleSize; + private static final int DEFAULT_SAMPLE_SIZE = 150; + private static final int MAX_SAMPLE_SIZE_ALLOWED = 1000; + + public FieldTypeInference(String indexName, MapperService mapperService, IndexReader indexReader) { + this.indexName = indexName; + this.mapperService = mapperService; + this.indexReader = indexReader; + this.sampleSize = DEFAULT_SAMPLE_SIZE; + } + + public void setSampleSize(int sampleSize) { + if (sampleSize > MAX_SAMPLE_SIZE_ALLOWED) { + throw new IllegalArgumentException("sample_size should be less than " + MAX_SAMPLE_SIZE_ALLOWED); + } + this.sampleSize = sampleSize; + } + + public int getSampleSize() { + return sampleSize; + } + + public Mapper infer(ValueFetcher valueFetcher) throws IOException { + RandomSourceValuesGenerator valuesGenerator = new RandomSourceValuesGenerator(sampleSize, indexReader, valueFetcher); + Mapper inferredMapper = null; + while (inferredMapper == null && valuesGenerator.hasNext()) { + List values = valuesGenerator.next(); + if (values == null || values.isEmpty()) { + continue; + } + // always use first value in case of multi value field to infer type + inferredMapper = inferTypeFromObject(values.get(0)); + } + return inferredMapper; + } + + private Mapper inferTypeFromObject(Object o) throws IOException { + if (o == null) { + return null; + } + DocumentMapper mapper = mapperService.documentMapper(); + XContentBuilder builder = XContentFactory.jsonBuilder().startObject().field("field", o).endObject(); + BytesReference bytesReference = BytesReference.bytes(builder); + SourceToParse sourceToParse = new SourceToParse(indexName, "_id", bytesReference, JsonXContent.jsonXContent.mediaType()); + ParsedDocument parsedDocument = mapper.parse(sourceToParse); + Mapping mapping = parsedDocument.dynamicMappingsUpdate(); + return mapping.root.getMapper("field"); + } + + private static class RandomSourceValuesGenerator implements Iterator> { + private final ValueFetcher valueFetcher; + private final IndexReader indexReader; + private final SourceLookup sourceLookup; + private final int[] docs; + private int iter; + private int leaf; + private final int MAX_ATTEMPTS_TO_GENERATE_RANDOM_SAMPLES = 10000; + + public RandomSourceValuesGenerator(int sampleSize, IndexReader indexReader, ValueFetcher valueFetcher) { + this.valueFetcher = valueFetcher; + this.indexReader = indexReader; + sampleSize = Math.min(sampleSize, indexReader.numDocs()); + this.docs = getSortedRandomNum( + sampleSize, + indexReader.numDocs(), + Math.max(sampleSize, MAX_ATTEMPTS_TO_GENERATE_RANDOM_SAMPLES) + ); + this.iter = 0; + this.leaf = -1; + this.sourceLookup = new SourceLookup(); + if (hasNext()) { + setNextLeaf(); + } + } + + @Override + public boolean hasNext() { + return iter < docs.length && leaf < indexReader.leaves().size(); + } + + /** + * Ensure hasNext() is called before calling next() + */ + @Override + public List next() { + int docID = docs[iter] - indexReader.leaves().get(leaf).docBase; + if (docID >= indexReader.leaves().get(leaf).reader().numDocs()) { + setNextLeaf(); + } + // deleted docs are getting used to infer type, which should be okay? + sourceLookup.setSegmentAndDocument(indexReader.leaves().get(leaf), docs[iter] - indexReader.leaves().get(leaf).docBase); + try { + iter++; + return valueFetcher.fetchValues(sourceLookup); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private void setNextLeaf() { + int readerIndex = ReaderUtil.subIndex(docs[iter], indexReader.leaves()); + if (readerIndex != leaf) { + leaf = readerIndex; + } else { + // this will only happen when leaves are exhausted and readerIndex will be indexReader.leaves()-1. + leaf++; + } + if (leaf < indexReader.leaves().size()) { + valueFetcher.setNextReader(indexReader.leaves().get(leaf)); + } + } + + private static int[] getSortedRandomNum(int sampleSize, int upperBound, int attempts) { + Set generatedNumbers = new TreeSet<>(); + Random random = Randomness.get(); + int itr = 0; + if (upperBound <= 10 * sampleSize) { + List numberList = new ArrayList<>(); + for (int i = 0; i < upperBound; i++) { + numberList.add(i); + } + Collections.shuffle(numberList, random); + generatedNumbers.addAll(numberList.subList(0, sampleSize)); + } else { + while (generatedNumbers.size() < sampleSize && itr++ < attempts) { + int randomNumber = random.nextInt(upperBound); + generatedNumbers.add(randomNumber); + } + } + return generatedNumbers.stream().mapToInt(Integer::valueOf).toArray(); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index 83fb743305702..a1f3894c9f14c 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -73,6 +73,7 @@ import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -200,6 +201,11 @@ public enum MergeReason { Property.IndexScope, Property.Deprecated ); + // Deprecated set of meta-fields, for checking if a field is meta, use an instance method isMetadataField instead + @Deprecated + public static final Set META_FIELDS_BEFORE_7DOT8 = Collections.unmodifiableSet( + new HashSet<>(Arrays.asList("_id", IgnoredFieldMapper.NAME, "_index", "_routing", "_size", "_timestamp", "_ttl", "_type")) + ); private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(MapperService.class); diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java index 2029b461674c7..e61e5ecd4084a 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java @@ -10,7 +10,6 @@ import org.apache.lucene.store.IndexInput; import org.opensearch.common.annotation.PublicApi; -import org.opensearch.common.settings.Setting; import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.breaker.CircuitBreakingException; import org.opensearch.index.store.remote.utils.cache.CacheUsage; @@ -52,21 +51,6 @@ public class FileCache implements RefCountedCache { private final CircuitBreaker circuitBreaker; - /** - * Defines a limit of how much total remote data can be referenced as a ratio of the size of the disk reserved for - * the file cache. For example, if 100GB disk space is configured for use as a file cache and the - * remote_data_ratio of 5 is defined, then a total of 500GB of remote data can be loaded as searchable snapshots. - * This is designed to be a safeguard to prevent oversubscribing a cluster. - * Specify a value of zero for no limit, which is the default for compatibility reasons. - */ - public static final Setting DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING = Setting.doubleSetting( - "cluster.filecache.remote_data_ratio", - 0.0, - 0.0, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - public FileCache(SegmentedCache cache, CircuitBreaker circuitBreaker) { this.theCache = cache; this.circuitBreaker = circuitBreaker; diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheSettings.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheSettings.java new file mode 100644 index 0000000000000..76086be932ecb --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheSettings.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store.remote.filecache; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; + +/** + * Settings relate to file cache + * + * @opensearch.internal + */ +public class FileCacheSettings { + /** + * Defines a limit of how much total remote data can be referenced as a ratio of the size of the disk reserved for + * the file cache. For example, if 100GB disk space is configured for use as a file cache and the + * remote_data_ratio of 5 is defined, then a total of 500GB of remote data can be loaded as searchable snapshots. + * This is designed to be a safeguard to prevent oversubscribing a cluster. + * Specify a value of zero for no limit, which is the default for compatibility reasons. + */ + public static final Setting DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING = Setting.doubleSetting( + "cluster.filecache.remote_data_ratio", + 0.0, + 0.0, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private volatile double remoteDataRatio; + + public FileCacheSettings(Settings settings, ClusterSettings clusterSettings) { + setRemoteDataRatio(DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.get(settings)); + clusterSettings.addSettingsUpdateConsumer(DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING, this::setRemoteDataRatio); + } + + public void setRemoteDataRatio(double remoteDataRatio) { + this.remoteDataRatio = remoteDataRatio; + } + + public double getRemoteDataRatio() { + return remoteDataRatio; + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/BaseTranslogReader.java b/server/src/main/java/org/opensearch/index/translog/BaseTranslogReader.java index d6fa2a2e53de3..37af1dcbeab8b 100644 --- a/server/src/main/java/org/opensearch/index/translog/BaseTranslogReader.java +++ b/server/src/main/java/org/opensearch/index/translog/BaseTranslogReader.java @@ -32,6 +32,7 @@ package org.opensearch.index.translog; +import org.opensearch.core.common.io.stream.BufferedChecksumStreamInput; import org.opensearch.core.common.io.stream.ByteBufferStreamInput; import org.opensearch.index.seqno.SequenceNumbers; diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index 842e9c77d2350..87e0c21b8203c 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -48,6 +48,8 @@ import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.BufferedChecksumStreamInput; +import org.opensearch.core.common.io.stream.BufferedChecksumStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.index.shard.ShardId; diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java b/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java index 7b5be9505f27a..66a9fe08d06b5 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java @@ -40,6 +40,8 @@ import org.apache.lucene.store.OutputStreamDataOutput; import org.apache.lucene.util.BytesRef; import org.opensearch.common.io.Channels; +import org.opensearch.core.common.io.stream.BufferedChecksumStreamInput; +import org.opensearch.core.common.io.stream.BufferedChecksumStreamOutput; import org.opensearch.core.common.io.stream.InputStreamStreamInput; import org.opensearch.core.common.io.stream.OutputStreamStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogSnapshot.java b/server/src/main/java/org/opensearch/index/translog/TranslogSnapshot.java index 89718156cbbe8..521472f4d64a0 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogSnapshot.java @@ -32,6 +32,7 @@ package org.opensearch.index.translog; import org.opensearch.common.io.Channels; +import org.opensearch.core.common.io.stream.BufferedChecksumStreamInput; import org.opensearch.index.seqno.SequenceNumbers; import java.io.EOFException; diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java b/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java index 86f7567f3333d..b0c7d51c3e43b 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java @@ -50,6 +50,7 @@ import org.opensearch.core.Assertions; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.BufferedChecksumStreamInput; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.seqno.SequenceNumbers; diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index 44af83bb839c1..57f7e402536f2 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -715,23 +715,28 @@ private synchronized void cleanCache(double stalenessThreshold) { } // Contains CleanupKey objects with open shard but invalidated readerCacheKeyId. final Set cleanupKeysFromOutdatedReaders = new HashSet<>(); - // Contains CleanupKey objects of a closed shard. + // Contains CleanupKey objects for a full cache cleanup. + final Set> cleanupKeysFromFullClean = new HashSet<>(); + // Contains CleanupKey objects for a closed shard. final Set> cleanupKeysFromClosedShards = new HashSet<>(); for (Iterator iterator = keysToClean.iterator(); iterator.hasNext();) { CleanupKey cleanupKey = iterator.next(); iterator.remove(); - if (cleanupKey.readerCacheKeyId == null || !cleanupKey.entity.isOpen()) { - // null indicates full cleanup, as does a closed shard - IndexShard indexShard = (IndexShard) cleanupKey.entity.getCacheIdentity(); + final IndexShard indexShard = (IndexShard) cleanupKey.entity.getCacheIdentity(); + if (cleanupKey.readerCacheKeyId == null) { + // null indicates full cleanup // Add both shardId and indexShardHashCode to uniquely identify an indexShard. + cleanupKeysFromFullClean.add(new Tuple<>(indexShard.shardId(), indexShard.hashCode())); + } else if (!cleanupKey.entity.isOpen()) { + // The shard is closed cleanupKeysFromClosedShards.add(new Tuple<>(indexShard.shardId(), indexShard.hashCode())); } else { cleanupKeysFromOutdatedReaders.add(cleanupKey); } } - if (cleanupKeysFromOutdatedReaders.isEmpty() && cleanupKeysFromClosedShards.isEmpty()) { + if (cleanupKeysFromOutdatedReaders.isEmpty() && cleanupKeysFromFullClean.isEmpty() && cleanupKeysFromClosedShards.isEmpty()) { return; } @@ -740,15 +745,15 @@ private synchronized void cleanCache(double stalenessThreshold) { for (Iterator> iterator = cache.keys().iterator(); iterator.hasNext();) { ICacheKey key = iterator.next(); Key delegatingKey = key.key; - if (cleanupKeysFromClosedShards.contains(new Tuple<>(delegatingKey.shardId, delegatingKey.indexShardHashCode))) { - // Since the shard is closed, the cache should drop stats for this shard. - dimensionListsToDrop.add(key.dimensions); + Tuple shardIdInfo = new Tuple<>(delegatingKey.shardId, delegatingKey.indexShardHashCode); + if (cleanupKeysFromFullClean.contains(shardIdInfo) || cleanupKeysFromClosedShards.contains(shardIdInfo)) { iterator.remove(); } else { CacheEntity cacheEntity = cacheEntityLookup.apply(delegatingKey.shardId).orElse(null); if (cacheEntity == null) { // If cache entity is null, it means that index or shard got deleted/closed meanwhile. // So we will delete this key. + dimensionListsToDrop.add(key.dimensions); iterator.remove(); } else { CleanupKey cleanupKey = new CleanupKey(cacheEntity, delegatingKey.readerCacheKeyId); @@ -757,6 +762,12 @@ private synchronized void cleanCache(double stalenessThreshold) { } } } + + if (cleanupKeysFromClosedShards.contains(shardIdInfo)) { + // Since the shard is closed, the cache should drop stats for this shard. + // This should not happen on a full cache cleanup. + dimensionListsToDrop.add(key.dimensions); + } } for (List closedDimensions : dimensionListsToDrop) { // Invalidate a dummy key containing the dimensions we need to drop stats for diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 04bd31e6a5809..cb1f2caa082fc 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -155,6 +155,7 @@ import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.filecache.FileCacheCleaner; import org.opensearch.index.store.remote.filecache.FileCacheFactory; +import org.opensearch.index.store.remote.filecache.FileCacheSettings; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesService; import org.opensearch.indices.RemoteStoreSettings; @@ -1159,7 +1160,8 @@ protected Node( metadataIndexUpgradeService, shardLimitValidator, indicesService, - clusterInfoService::getClusterInfo + clusterInfoService::getClusterInfo, + new FileCacheSettings(settings, clusterService.getClusterSettings())::getRemoteDataRatio ); RemoteStoreRestoreService remoteStoreRestoreService = new RemoteStoreRestoreService( @@ -1197,7 +1199,8 @@ protected Node( rerouteService, fsHealthService, persistedStateRegistry, - remoteStoreNodeService + remoteStoreNodeService, + clusterManagerMetrics ); final SearchPipelineService searchPipelineService = new SearchPipelineService( clusterService, diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java index b10ec0d99c3d5..a0f745a4270c4 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java @@ -13,6 +13,7 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.node.Node; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -28,6 +29,8 @@ import java.util.Set; import java.util.stream.Collectors; +import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; + /** * This is an abstraction for validating and storing information specific to remote backed storage nodes. * @@ -46,6 +49,8 @@ public class RemoteStoreNodeAttribute { + "." + CryptoMetadata.SETTINGS_KEY; public static final String REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX = "remote_store.repository.%s.settings."; + public static final String REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.routing_table.repository"; + private final RepositoriesMetadata repositoriesMetadata; public static List SUPPORTED_DATA_REPO_NAME_ATTRIBUTES = List.of( @@ -157,6 +162,10 @@ private Set getValidatedRepositoryNames(DiscoveryNode node) { } else if (node.getAttributes().containsKey(REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY)) { repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY)); } + if (node.getAttributes().containsKey(REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY)) { + repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY)); + } + return repositoryNames; } @@ -187,6 +196,15 @@ public static boolean isRemoteStoreClusterStateEnabled(Settings settings) { && isRemoteClusterStateAttributePresent(settings); } + private static boolean isRemoteRoutingTableAttributePresent(Settings settings) { + return settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY) + .isEmpty() == false; + } + + public static boolean isRemoteRoutingTableEnabled(Settings settings) { + return FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) && isRemoteRoutingTableAttributePresent(settings); + } + public RepositoriesMetadata getRepositoriesMetadata() { return this.repositoriesMetadata; } @@ -231,6 +249,21 @@ public int hashCode() { return hashCode; } + /** + * Checks if 2 instances are equal, with option to skip check for a list of repos. + * * + * @param o other instance + * @param reposToSkip list of repos to skip check for equality + * @return {@code true} iff both instances are equal, not including the repositories in both instances if they are part of reposToSkip. + */ + public boolean equalsWithRepoSkip(Object o, List reposToSkip) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + RemoteStoreNodeAttribute that = (RemoteStoreNodeAttribute) o; + return this.getRepositoriesMetadata().equalsIgnoreGenerationsWithRepoSkip(that.getRepositoriesMetadata(), reposToSkip); + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/opensearch/script/Script.java b/server/src/main/java/org/opensearch/script/Script.java index 9e74314c281cd..f18bd992cb00d 100644 --- a/server/src/main/java/org/opensearch/script/Script.java +++ b/server/src/main/java/org/opensearch/script/Script.java @@ -589,7 +589,7 @@ public Script(StreamInput in) throws IOException { @SuppressWarnings("unchecked") Map options = (Map) (Map) in.readMap(); this.options = options; - this.params = in.readMap(); + this.params = Collections.unmodifiableMap(in.readMap()); } @Override diff --git a/server/src/main/java/org/opensearch/script/UpdateScript.java b/server/src/main/java/org/opensearch/script/UpdateScript.java index 86697e9ae550e..f6355fe24817b 100644 --- a/server/src/main/java/org/opensearch/script/UpdateScript.java +++ b/server/src/main/java/org/opensearch/script/UpdateScript.java @@ -32,6 +32,7 @@ package org.opensearch.script; +import java.util.Collections; import java.util.Map; /** @@ -53,7 +54,7 @@ public abstract class UpdateScript { private final Map ctx; public UpdateScript(Map params, Map ctx) { - this.params = params; + this.params = Collections.unmodifiableMap(params); this.ctx = ctx; } diff --git a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java index 07248a0719c3a..6c22567d8cf0d 100644 --- a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java @@ -1004,6 +1004,37 @@ public SearchSourceBuilder derivedField(String name, String type, Script script) return this; } + /** + * Adds a derived field with the given name with provided type, script and other parameters + * @param name name of the derived field + * @param type type of the derived field + * @param script script associated with derived field + * @param properties map of field name and type of field for nested fields within object derived field + * @param sourceIndexedField source text field which is indexed to filter documents for better performance + * @param format date format + * @param ignoreMalformed ignores malformed fields instead of failing search request + */ + public SearchSourceBuilder derivedField( + String name, + String type, + Script script, + Map properties, + String sourceIndexedField, + String format, + Boolean ignoreMalformed + ) { + if (derivedFields == null) { + derivedFields = new ArrayList<>(); + } + DerivedField derivedField = new DerivedField(name, type, script); + derivedField.setProperties(properties); + derivedField.setSourceIndexedField(sourceIndexedField); + derivedField.setFormat(format); + derivedField.setIgnoreMalformed(ignoreMalformed); + derivedFields.add(derivedField); + return this; + } + /** * Sets the boost a specific index or alias will receive when the query is executed * against it. diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 93aac68eb898c..4cc8049b2b06b 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -129,7 +129,6 @@ import static org.opensearch.common.util.set.Sets.newHashSet; import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectory.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; -import static org.opensearch.index.store.remote.filecache.FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING; import static org.opensearch.node.Node.NODE_SEARCH_CACHE_SIZE_SETTING; /** @@ -204,6 +203,8 @@ public class RestoreService implements ClusterStateApplier { private final ClusterManagerTaskThrottler.ThrottlingKey restoreSnapshotTaskKey; + private final Supplier dataToFileCacheSizeRatioSupplier; + private static final CleanRestoreStateTaskExecutor cleanRestoreStateTaskExecutor = new CleanRestoreStateTaskExecutor(); public RestoreService( @@ -214,7 +215,8 @@ public RestoreService( MetadataIndexUpgradeService metadataIndexUpgradeService, ShardLimitValidator shardLimitValidator, IndicesService indicesService, - Supplier clusterInfoSupplier + Supplier clusterInfoSupplier, + Supplier dataToFileCacheSizeRatioSupplier ) { this.clusterService = clusterService; this.repositoriesService = repositoriesService; @@ -228,6 +230,7 @@ public RestoreService( this.shardLimitValidator = shardLimitValidator; this.indicesService = indicesService; this.clusterInfoSupplier = clusterInfoSupplier; + this.dataToFileCacheSizeRatioSupplier = dataToFileCacheSizeRatioSupplier; // Task is onboarded for throttling, it will get retried from associated TransportClusterManagerNodeAction. restoreSnapshotTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.RESTORE_SNAPSHOT_KEY, true); @@ -399,9 +402,7 @@ public ClusterState execute(ClusterState currentState) { if (isRemoteSnapshot) { snapshotIndexMetadata = addSnapshotToIndexSettings(snapshotIndexMetadata, snapshot, snapshotIndexId); } - final boolean isSearchableSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match( - snapshotIndexMetadata.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()) - ); + final boolean isSearchableSnapshot = snapshotIndexMetadata.isRemoteSnapshot(); final boolean isRemoteStoreShallowCopy = Boolean.TRUE.equals( snapshotInfo.isRemoteStoreIndexShallowCopyEnabled() ) && metadata.index(index).getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false); @@ -855,7 +856,7 @@ private IndexMetadata updateIndexSettings( private void validateSearchableSnapshotRestorable(long totalRestorableRemoteIndexesSize) { ClusterInfo clusterInfo = clusterInfoSupplier.get(); - double remoteDataToFileCacheRatio = DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.get(clusterService.getSettings()); + final double remoteDataToFileCacheRatio = dataToFileCacheSizeRatioSupplier.get(); Map nodeFileCacheStats = clusterInfo.getNodeFileCacheStats(); if (nodeFileCacheStats.isEmpty() || remoteDataToFileCacheRatio <= 0.01f) { return; @@ -869,7 +870,7 @@ private void validateSearchableSnapshotRestorable(long totalRestorableRemoteInde .sum(); Predicate isRemoteSnapshotShard = shardRouting -> shardRouting.primary() - && indicesService.indexService(shardRouting.index()).getIndexSettings().isRemoteSnapshot(); + && clusterService.state().getMetadata().getIndexSafe(shardRouting.index()).isRemoteSnapshot(); ShardsIterator shardsIterator = clusterService.state() .routingTable() diff --git a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java index a106706c00732..d0bc41b459cc3 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java @@ -33,6 +33,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.coordination.Coordinator.Mode; import org.opensearch.cluster.coordination.FollowersChecker.FollowerCheckRequest; @@ -47,6 +48,9 @@ import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; +import org.opensearch.telemetry.TestInMemoryMetricsRegistry; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.EqualsHashCodeTestUtils; import org.opensearch.test.EqualsHashCodeTestUtils.CopyFunction; @@ -131,6 +135,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req transportService.start(); transportService.acceptIncomingRequests(); + TestInMemoryMetricsRegistry metricsRegistry = new TestInMemoryMetricsRegistry(); + final FollowersChecker followersChecker = new FollowersChecker( settings, clusterSettings, @@ -139,7 +145,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req (node, reason) -> { assert false : node; }, - () -> new StatusInfo(StatusInfo.Status.HEALTHY, "healthy-info") + () -> new StatusInfo(StatusInfo.Status.HEALTHY, "healthy-info"), + new ClusterManagerMetrics(metricsRegistry) ); followersChecker.setCurrentNodes(discoveryNodesHolder[0]); @@ -193,35 +200,43 @@ protected void onSendRequest(long requestId, String action, TransportRequest req followersChecker.clearCurrentNodes(); deterministicTaskQueue.runAllTasks(); assertThat(checkedNodes, empty()); + assertEquals(Integer.valueOf(0), metricsRegistry.getCounterStore().get("followers.checker.failure.count").getCounterValue()); } public void testFailsNodeThatDoesNotRespond() { final Settings settings = randomSettings(); + TestInMemoryMetricsRegistry metricsRegistry = new TestInMemoryMetricsRegistry(); testBehaviourOfFailingNode( settings, () -> null, "followers check retry count exceeded", (FOLLOWER_CHECK_RETRY_COUNT_SETTING.get(settings) - 1) * FOLLOWER_CHECK_INTERVAL_SETTING.get(settings).millis() + FOLLOWER_CHECK_RETRY_COUNT_SETTING.get(settings) * FOLLOWER_CHECK_TIMEOUT_SETTING.get(settings).millis(), - () -> new StatusInfo(HEALTHY, "healthy-info") + () -> new StatusInfo(HEALTHY, "healthy-info"), + metricsRegistry ); + assertEquals(Integer.valueOf(2), metricsRegistry.getCounterStore().get("followers.checker.failure.count").getCounterValue()); } public void testFailsNodeThatRejectsCheck() { final Settings settings = randomSettings(); + TestInMemoryMetricsRegistry metricsRegistry = new TestInMemoryMetricsRegistry(); testBehaviourOfFailingNode( settings, () -> { throw new OpenSearchException("simulated exception"); }, "followers check retry count exceeded", (FOLLOWER_CHECK_RETRY_COUNT_SETTING.get(settings) - 1) * FOLLOWER_CHECK_INTERVAL_SETTING.get(settings).millis(), - () -> new StatusInfo(HEALTHY, "healthy-info") + () -> new StatusInfo(HEALTHY, "healthy-info"), + metricsRegistry ); + assertEquals(Integer.valueOf(2), metricsRegistry.getCounterStore().get("followers.checker.failure.count").getCounterValue()); } public void testFailureCounterResetsOnSuccess() { final Settings settings = randomSettings(); final int retryCount = FOLLOWER_CHECK_RETRY_COUNT_SETTING.get(settings); final int maxRecoveries = randomIntBetween(3, 10); + TestInMemoryMetricsRegistry metricsRegistry = new TestInMemoryMetricsRegistry(); // passes just enough checks to keep it alive, up to maxRecoveries, and then fails completely testBehaviourOfFailingNode(settings, new Supplier() { @@ -241,18 +256,23 @@ public Empty get() { "followers check retry count exceeded", (FOLLOWER_CHECK_RETRY_COUNT_SETTING.get(settings) * (maxRecoveries + 1) - 1) * FOLLOWER_CHECK_INTERVAL_SETTING.get(settings) .millis(), - () -> new StatusInfo(HEALTHY, "healthy-info") + () -> new StatusInfo(HEALTHY, "healthy-info"), + metricsRegistry ); + assertEquals(Integer.valueOf(2), metricsRegistry.getCounterStore().get("followers.checker.failure.count").getCounterValue()); } public void testFailsNodeThatIsDisconnected() { + TestInMemoryMetricsRegistry metricsRegistry = new TestInMemoryMetricsRegistry(); testBehaviourOfFailingNode( Settings.EMPTY, () -> { throw new ConnectTransportException(null, "simulated exception"); }, "disconnected", 0, - () -> new StatusInfo(HEALTHY, "healthy-info") + () -> new StatusInfo(HEALTHY, "healthy-info"), + metricsRegistry ); + assertEquals(Integer.valueOf(2), metricsRegistry.getCounterStore().get("followers.checker.failure.count").getCounterValue()); } public void testFailsNodeThatDisconnects() { @@ -297,6 +317,7 @@ public String toString() { transportService.acceptIncomingRequests(); final AtomicBoolean nodeFailed = new AtomicBoolean(); + TestInMemoryMetricsRegistry metricsRegistry = new TestInMemoryMetricsRegistry(); final FollowersChecker followersChecker = new FollowersChecker( settings, @@ -307,7 +328,8 @@ public String toString() { assertTrue(nodeFailed.compareAndSet(false, true)); assertThat(reason, equalTo("disconnected")); }, - () -> new StatusInfo(HEALTHY, "healthy-info") + () -> new StatusInfo(HEALTHY, "healthy-info"), + new ClusterManagerMetrics(metricsRegistry) ); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(localNode).add(otherNode).localNodeId(localNode.getId()).build(); @@ -318,16 +340,20 @@ public String toString() { deterministicTaskQueue.runAllRunnableTasks(); assertTrue(nodeFailed.get()); assertThat(followersChecker.getFaultyNodes(), contains(otherNode)); + assertEquals(Integer.valueOf(1), metricsRegistry.getCounterStore().get("followers.checker.failure.count").getCounterValue()); } public void testFailsNodeThatIsUnhealthy() { + TestInMemoryMetricsRegistry metricsRegistry = new TestInMemoryMetricsRegistry(); testBehaviourOfFailingNode( randomSettings(), () -> { throw new NodeHealthCheckFailureException("non writable exception"); }, "health check failed", 0, - () -> new StatusInfo(HEALTHY, "healthy-info") + () -> new StatusInfo(HEALTHY, "healthy-info"), + metricsRegistry ); + assertEquals(Integer.valueOf(2), metricsRegistry.getCounterStore().get("followers.checker.failure.count").getCounterValue()); } private void testBehaviourOfFailingNode( @@ -335,7 +361,8 @@ private void testBehaviourOfFailingNode( Supplier responder, String failureReason, long expectedFailureTime, - NodeHealthService nodeHealthService + NodeHealthService nodeHealthService, + MetricsRegistry metricsRegistry ) { final DiscoveryNode localNode = new DiscoveryNode("local-node", buildNewFakeTransportAddress(), Version.CURRENT); final DiscoveryNode otherNode = new DiscoveryNode("other-node", buildNewFakeTransportAddress(), Version.CURRENT); @@ -386,7 +413,6 @@ public String toString() { transportService.acceptIncomingRequests(); final AtomicBoolean nodeFailed = new AtomicBoolean(); - final FollowersChecker followersChecker = new FollowersChecker( settings, clusterSettings, @@ -396,7 +422,8 @@ public String toString() { assertTrue(nodeFailed.compareAndSet(false, true)); assertThat(reason, equalTo(failureReason)); }, - nodeHealthService + nodeHealthService, + new ClusterManagerMetrics(metricsRegistry) ); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(localNode).add(otherNode).localNodeId(localNode.getId()).build(); @@ -501,7 +528,11 @@ protected void onSendRequest(long requestId, String action, TransportRequest req if (exception != null) { throw exception; } - }, (node, reason) -> { assert false : node; }, () -> new StatusInfo(UNHEALTHY, "unhealthy-info")); + }, + (node, reason) -> { assert false : node; }, + () -> new StatusInfo(UNHEALTHY, "unhealthy-info"), + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) + ); final long leaderTerm = randomLongBetween(2, Long.MAX_VALUE); final long followerTerm = randomLongBetween(1, leaderTerm - 1); @@ -574,7 +605,11 @@ protected void onSendRequest(long requestId, String action, TransportRequest req if (exception != null) { throw exception; } - }, (node, reason) -> { assert false : node; }, () -> new StatusInfo(HEALTHY, "healthy-info")); + }, + (node, reason) -> { assert false : node; }, + () -> new StatusInfo(HEALTHY, "healthy-info"), + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) + ); { // Does not call into the coordinator in the normal case @@ -721,7 +756,11 @@ public void testPreferClusterManagerNodes() { ); final FollowersChecker followersChecker = new FollowersChecker(Settings.EMPTY, clusterSettings, transportService, fcr -> { assert false : fcr; - }, (node, reason) -> { assert false : node; }, () -> new StatusInfo(HEALTHY, "healthy-info")); + }, + (node, reason) -> { assert false : node; }, + () -> new StatusInfo(HEALTHY, "healthy-info"), + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) + ); followersChecker.setCurrentNodes(discoveryNodes); List followerTargets = Stream.of(capturingTransport.getCapturedRequestsAndClear()) .map(cr -> cr.node) diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 3e343e95f6c4b..9cb1bd0b57132 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -72,6 +72,7 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; @@ -944,6 +945,145 @@ public void testNodeJoinInMixedMode() { JoinTaskExecutor.ensureNodesCompatibility(joiningNode2, currentNodes, metadata); } + public void testRemoteRoutingTableRepoAbsentNodeJoin() { + + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testRemoteRoutingTableNodeJoinRepoPresentInJoiningNode() { + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + Map attr = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + attr.putAll(remoteRoutingTableAttributes(ROUTING_TABLE_REPO)); + DiscoveryNode joiningNode = newDiscoveryNode(attr); + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testRemoteRoutingTableNodeJoinRepoPresentInExistingNode() { + Map attr = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + attr.putAll(remoteRoutingTableAttributes(ROUTING_TABLE_REPO)); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + attr, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + assertThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()) + ); + } + + public void testRemoteRoutingTableNodeJoinRepoPresentInBothNode() { + Map attr = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + attr.putAll(remoteRoutingTableAttributes(ROUTING_TABLE_REPO)); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + attr, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(attr); + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testRemoteRoutingTableNodeJoinNodeWithRemoteAndRoutingRepoDifference() { + Map attr = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + attr.putAll(remoteRoutingTableAttributes(ROUTING_TABLE_REPO)); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + attr, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final DiscoveryNode existingNode2 = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode2).add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(attr); + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testRemoteRoutingTableNodeJoinNodeWithRemoteAndRoutingRepoDifferenceMixedMode() { + Map attr = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + attr.putAll(remoteRoutingTableAttributes(ROUTING_TABLE_REPO)); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + attr, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final DiscoveryNode existingNode2 = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final Settings settings = Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), RemoteStoreNodeService.Direction.REMOTE_STORE) + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + .build(); + final Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + FeatureFlags.initializeFeatureFlags(nodeSettings); + Metadata metadata = Metadata.builder().persistentSettings(settings).build(); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode2).add(existingNode).localNodeId(existingNode.getId()).build()) + .metadata(metadata) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(attr); + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + private void validateRepositoryMetadata(ClusterState updatedState, DiscoveryNode existingNode, int expectedRepositories) throws Exception { @@ -985,6 +1125,7 @@ private DiscoveryNode newDiscoveryNode(Map attributes) { private static final String TRANSLOG_REPO = "translog-repo"; private static final String CLUSTER_STATE_REPO = "cluster-state-repo"; private static final String COMMON_REPO = "remote-repo"; + private static final String ROUTING_TABLE_REPO = "routing-table-repo"; private Map remoteStoreNodeAttributes(String segmentRepoName, String translogRepoName) { return remoteStoreNodeAttributes(segmentRepoName, translogRepoName, CLUSTER_STATE_REPO); @@ -1049,6 +1190,28 @@ private Map remoteStateNodeAttributes(String clusterStateRepo) { }; } + private Map remoteRoutingTableAttributes(String repoName) { + String routingTableRepositoryTypeAttributeKey = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + repoName + ); + String routingTableRepositorySettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + repoName + ); + + return new HashMap<>() { + { + put(REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName); + putIfAbsent(routingTableRepositoryTypeAttributeKey, "s3"); + putIfAbsent(routingTableRepositorySettingsAttributeKeyPrefix + "bucket", "state_bucket"); + putIfAbsent(routingTableRepositorySettingsAttributeKeyPrefix + "base_path", "/state/path"); + } + }; + } + private void validateAttributes(Map remoteStoreNodeAttributes, ClusterState currentState, DiscoveryNode existingNode) { DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes); Exception e = assertThrows( diff --git a/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java index fe65058333116..decdeddfa37a1 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java @@ -34,6 +34,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.coordination.LeaderChecker.LeaderCheckRequest; import org.opensearch.cluster.node.DiscoveryNode; @@ -44,6 +45,7 @@ import org.opensearch.core.transport.TransportResponse; import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.monitor.StatusInfo; +import org.opensearch.telemetry.TestInMemoryMetricsRegistry; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.EqualsHashCodeTestUtils; import org.opensearch.test.EqualsHashCodeTestUtils.CopyFunction; @@ -175,11 +177,13 @@ public String toString() { transportService.acceptIncomingRequests(); final AtomicBoolean leaderFailed = new AtomicBoolean(); + TestInMemoryMetricsRegistry metricsRegistry = new TestInMemoryMetricsRegistry(); + final ClusterManagerMetrics clusterManagerMetrics = new ClusterManagerMetrics(metricsRegistry); final LeaderChecker leaderChecker = new LeaderChecker(settings, clusterSettings, transportService, e -> { assertThat(e.getMessage(), matchesRegex("node \\[.*\\] failed \\[[1-9][0-9]*\\] consecutive checks")); assertTrue(leaderFailed.compareAndSet(false, true)); - }, () -> new StatusInfo(StatusInfo.Status.HEALTHY, "healthy-info")); + }, () -> new StatusInfo(StatusInfo.Status.HEALTHY, "healthy-info"), clusterManagerMetrics); logger.info("--> creating first checker"); leaderChecker.updateLeader(leader1); @@ -229,6 +233,7 @@ public String toString() { ); } leaderChecker.updateLeader(null); + assertEquals(Integer.valueOf(1), metricsRegistry.getCounterStore().get("leader.checker.failure.count").getCounterValue()); } enum Response { @@ -293,10 +298,13 @@ public String toString() { transportService.acceptIncomingRequests(); final AtomicBoolean leaderFailed = new AtomicBoolean(); + TestInMemoryMetricsRegistry metricsRegistry = new TestInMemoryMetricsRegistry(); + final ClusterManagerMetrics clusterManagerMetrics = new ClusterManagerMetrics(metricsRegistry); + final LeaderChecker leaderChecker = new LeaderChecker(settings, clusterSettings, transportService, e -> { assertThat(e.getMessage(), anyOf(endsWith("disconnected"), endsWith("disconnected during check"))); assertTrue(leaderFailed.compareAndSet(false, true)); - }, () -> new StatusInfo(StatusInfo.Status.HEALTHY, "healthy-info")); + }, () -> new StatusInfo(StatusInfo.Status.HEALTHY, "healthy-info"), clusterManagerMetrics); leaderChecker.updateLeader(leader); { @@ -351,6 +359,7 @@ public String toString() { deterministicTaskQueue.runAllRunnableTasks(); assertTrue(leaderFailed.get()); } + assertEquals(Integer.valueOf(3), metricsRegistry.getCounterStore().get("leader.checker.failure.count").getCounterValue()); } public void testFollowerFailsImmediatelyOnHealthCheckFailure() { @@ -407,10 +416,12 @@ public String toString() { transportService.acceptIncomingRequests(); final AtomicBoolean leaderFailed = new AtomicBoolean(); + TestInMemoryMetricsRegistry metricsRegistry = new TestInMemoryMetricsRegistry(); + final ClusterManagerMetrics clusterManagerMetrics = new ClusterManagerMetrics(metricsRegistry); final LeaderChecker leaderChecker = new LeaderChecker(settings, clusterSettings, transportService, e -> { assertThat(e.getMessage(), endsWith("failed health checks")); assertTrue(leaderFailed.compareAndSet(false, true)); - }, () -> new StatusInfo(StatusInfo.Status.HEALTHY, "healthy-info")); + }, () -> new StatusInfo(StatusInfo.Status.HEALTHY, "healthy-info"), clusterManagerMetrics); leaderChecker.updateLeader(leader); @@ -430,6 +441,8 @@ public String toString() { assertTrue(leaderFailed.get()); } + + assertEquals(Integer.valueOf(1), metricsRegistry.getCounterStore().get("leader.checker.failure.count").getCounterValue()); } public void testLeaderBehaviour() { @@ -453,12 +466,15 @@ public void testLeaderBehaviour() { transportService.start(); transportService.acceptIncomingRequests(); + final TestInMemoryMetricsRegistry metricsRegistry = new TestInMemoryMetricsRegistry(); + final ClusterManagerMetrics clusterManagerMetrics = new ClusterManagerMetrics(metricsRegistry); final LeaderChecker leaderChecker = new LeaderChecker( settings, clusterSettings, transportService, e -> fail("shouldn't be checking anything"), - () -> nodeHealthServiceStatus.get() + () -> nodeHealthServiceStatus.get(), + clusterManagerMetrics ); final DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() @@ -523,6 +539,7 @@ public void testLeaderBehaviour() { equalTo("rejecting leader check from [" + otherNode + "] sent to a node that is no longer the cluster-manager") ); } + assertEquals(Integer.valueOf(0), metricsRegistry.getCounterStore().get("leader.checker.failure.count").getCounterValue()); } private class CapturingTransportResponseHandler implements TransportResponseHandler { diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index 10d5dceb74f55..f84f0326f4a9d 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -273,7 +273,8 @@ protected void onSendRequest( ElectionStrategy.DEFAULT_INSTANCE, nodeHealthService, persistedStateRegistry, - Mockito.mock(RemoteStoreNodeService.class) + Mockito.mock(RemoteStoreNodeService.class), + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index bde8a45359814..652633e689b93 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -69,7 +69,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.repositories.IndexId; import org.opensearch.snapshots.EmptySnapshotsInfoService; @@ -95,6 +94,7 @@ import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.opensearch.cluster.routing.ShardRoutingState.UNASSIGNED; import static org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; +import static org.opensearch.index.store.remote.filecache.FileCacheSettings.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -381,6 +381,7 @@ public void testFileCacheRemoteShardsDecisions() { .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%") .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%") + .put(DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.getKey(), 5) .build(); // We have an index with 2 primary shards each taking 40 bytes. Each node has 100 bytes available @@ -406,7 +407,6 @@ public void testFileCacheRemoteShardsDecisions() { DiskThresholdDecider diskThresholdDecider = makeDecider(diskSettings); Metadata metadata = Metadata.builder() .put(IndexMetadata.builder("test").settings(remoteIndexSettings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0)) - .persistentSettings(Settings.builder().put(FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.getKey(), 5).build()) .build(); RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java new file mode 100644 index 0000000000000..9a9cbfa153259 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.remote; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.repositories.FilterRepository; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.RepositoryMissingException; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; +import org.junit.Before; + +import java.util.function.Supplier; + +import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RemoteRoutingTableServiceTests extends OpenSearchTestCase { + + private RemoteRoutingTableService remoteRoutingTableService; + private Supplier repositoriesServiceSupplier; + private RepositoriesService repositoriesService; + private BlobStoreRepository blobStoreRepository; + + @Before + public void setup() { + repositoriesServiceSupplier = mock(Supplier.class); + repositoriesService = mock(RepositoriesService.class); + when(repositoriesServiceSupplier.get()).thenReturn(repositoriesService); + + Settings settings = Settings.builder() + .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, "routing_repository") + .build(); + + blobStoreRepository = mock(BlobStoreRepository.class); + when(repositoriesService.repository("routing_repository")).thenReturn(blobStoreRepository); + + Settings nodeSettings = Settings.builder().put(REMOTE_PUBLICATION_EXPERIMENTAL, "true").build(); + FeatureFlags.initializeFeatureFlags(nodeSettings); + + remoteRoutingTableService = new RemoteRoutingTableService(repositoriesServiceSupplier, settings); + } + + @After + public void teardown() throws Exception { + super.tearDown(); + remoteRoutingTableService.close(); + } + + public void testFailInitializationWhenRemoteRoutingDisabled() { + final Settings settings = Settings.builder().build(); + assertThrows(AssertionError.class, () -> new RemoteRoutingTableService(repositoriesServiceSupplier, settings)); + } + + public void testFailStartWhenRepositoryNotSet() { + doThrow(new RepositoryMissingException("repository missing")).when(repositoriesService).repository("routing_repository"); + assertThrows(RepositoryMissingException.class, () -> remoteRoutingTableService.start()); + } + + public void testFailStartWhenNotBlobRepository() { + final FilterRepository filterRepository = mock(FilterRepository.class); + when(repositoriesService.repository("routing_repository")).thenReturn(filterRepository); + assertThrows(AssertionError.class, () -> remoteRoutingTableService.start()); + } + +} diff --git a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java index b33ebf8333b36..5539b3237c2bf 100644 --- a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java @@ -32,6 +32,7 @@ package org.opensearch.discovery; import org.opensearch.Version; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.Coordinator; import org.opensearch.cluster.coordination.PersistedStateRegistry; @@ -48,6 +49,7 @@ import org.opensearch.gateway.GatewayMetaState; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.plugins.DiscoveryPlugin; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; @@ -128,7 +130,8 @@ private DiscoveryModule newModule(Settings settings, List plugi mock(RerouteService.class), null, new PersistedStateRegistry(), - remoteStoreNodeService + remoteStoreNodeService, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ); } diff --git a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java index 0b3cd49140939..d1f559eb75f85 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java @@ -99,7 +99,7 @@ public void testClusterMetadataManifestXContent() throws IOException { Version.CURRENT, "test-node-id", false, - ClusterMetadataManifest.CODEC_V2, + ClusterMetadataManifest.CODEC_V3, null, Collections.singletonList(uploadedIndexMetadata), "prev-cluster-uuid", @@ -123,7 +123,9 @@ public void testClusterMetadataManifestXContent() throws IOException { "custom--weighted_routing_netadata-file" ) ) - ).stream().collect(Collectors.toMap(UploadedMetadataAttribute::getAttributeName, Function.identity())) + ).stream().collect(Collectors.toMap(UploadedMetadataAttribute::getAttributeName, Function.identity())), + 1L, + randomUploadedIndexMetadataList() ); final XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); @@ -169,7 +171,9 @@ public void testClusterMetadataManifestSerializationEqualsHashCode() { "custom--weighted_routing_netadata-file" ) ) - ).stream().collect(Collectors.toMap(UploadedMetadataAttribute::getAttributeName, Function.identity())) + ).stream().collect(Collectors.toMap(UploadedMetadataAttribute::getAttributeName, Function.identity())), + 1L, + randomUploadedIndexMetadataList() ); { // Mutate Cluster Term EqualsHashCodeTestUtils.checkEqualsAndHashCode( @@ -309,6 +313,106 @@ public void testClusterMetadataManifestSerializationEqualsHashCode() { } } + public void testClusterMetadataManifestXContentV2() throws IOException { + UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "test-uuid", "/test/upload/path"); + UploadedMetadataAttribute uploadedMetadataAttribute = new UploadedMetadataAttribute("attribute_name", "testing_attribute"); + ClusterMetadataManifest originalManifest = new ClusterMetadataManifest( + 1L, + 1L, + "test-cluster-uuid", + "test-state-uuid", + Version.CURRENT, + "test-node-id", + false, + ClusterMetadataManifest.CODEC_V2, + null, + Collections.singletonList(uploadedIndexMetadata), + "prev-cluster-uuid", + true, + uploadedMetadataAttribute, + uploadedMetadataAttribute, + uploadedMetadataAttribute, + Collections.unmodifiableList( + Arrays.asList( + new UploadedMetadataAttribute( + RemoteClusterStateService.CUSTOM_METADATA + RemoteClusterStateService.CUSTOM_DELIMITER + RepositoriesMetadata.TYPE, + "custom--repositories-file" + ), + new UploadedMetadataAttribute( + RemoteClusterStateService.CUSTOM_METADATA + RemoteClusterStateService.CUSTOM_DELIMITER + IndexGraveyard.TYPE, + "custom--index_graveyard-file" + ), + new UploadedMetadataAttribute( + RemoteClusterStateService.CUSTOM_METADATA + RemoteClusterStateService.CUSTOM_DELIMITER + + WeightedRoutingMetadata.TYPE, + "custom--weighted_routing_netadata-file" + ) + ) + ).stream().collect(Collectors.toMap(UploadedMetadataAttribute::getAttributeName, Function.identity())), + 0, + new ArrayList<>() + ); + final XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + originalManifest.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + final ClusterMetadataManifest fromXContentManifest = ClusterMetadataManifest.fromXContentV2(parser); + assertEquals(originalManifest, fromXContentManifest); + } + } + + public void testClusterMetadataManifestXContentV3() throws IOException { + UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "test-uuid", "/test/upload/path"); + UploadedMetadataAttribute uploadedMetadataAttribute = new UploadedMetadataAttribute("attribute_name", "testing_attribute"); + ClusterMetadataManifest originalManifest = new ClusterMetadataManifest( + 1L, + 1L, + "test-cluster-uuid", + "test-state-uuid", + Version.CURRENT, + "test-node-id", + false, + ClusterMetadataManifest.CODEC_V3, + null, + Collections.singletonList(uploadedIndexMetadata), + "prev-cluster-uuid", + true, + uploadedMetadataAttribute, + uploadedMetadataAttribute, + uploadedMetadataAttribute, + Collections.unmodifiableList( + Arrays.asList( + new UploadedMetadataAttribute( + RemoteClusterStateService.CUSTOM_METADATA + RemoteClusterStateService.CUSTOM_DELIMITER + RepositoriesMetadata.TYPE, + "custom--repositories-file" + ), + new UploadedMetadataAttribute( + RemoteClusterStateService.CUSTOM_METADATA + RemoteClusterStateService.CUSTOM_DELIMITER + IndexGraveyard.TYPE, + "custom--index_graveyard-file" + ), + new UploadedMetadataAttribute( + RemoteClusterStateService.CUSTOM_METADATA + RemoteClusterStateService.CUSTOM_DELIMITER + + WeightedRoutingMetadata.TYPE, + "custom--weighted_routing_netadata-file" + ) + ) + ).stream().collect(Collectors.toMap(UploadedMetadataAttribute::getAttributeName, Function.identity())), + 1L, + Collections.singletonList(uploadedIndexMetadata) + ); + final XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + originalManifest.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + final ClusterMetadataManifest fromXContentManifest = ClusterMetadataManifest.fromXContent(parser); + assertEquals(originalManifest, fromXContentManifest); + } + } + private List randomUploadedIndexMetadataList() { final int size = randomIntBetween(1, 10); final List uploadedIndexMetadataList = new ArrayList<>(size); diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 5f0c371a3137e..4a53770c76d88 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -34,6 +34,7 @@ import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; @@ -84,6 +85,7 @@ import org.mockito.ArgumentMatchers; import static java.util.stream.Collectors.toList; +import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; import static org.opensearch.gateway.remote.RemoteClusterStateService.COORDINATION_METADATA; import static org.opensearch.gateway.remote.RemoteClusterStateService.DELIMITER; import static org.opensearch.gateway.remote.RemoteClusterStateService.FORMAT_PARAMS; @@ -96,6 +98,7 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; @@ -545,7 +548,7 @@ private void verifyWriteIncrementalGlobalMetadataFromOlderCodecSuccess(ClusterMe ); final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() - .codecVersion(2) + .codecVersion(3) .indices(Collections.emptyList()) .clusterTerm(1L) .stateVersion(1L) @@ -1069,6 +1072,8 @@ public void testReadGlobalMetadata() throws IOException { .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) .previousClusterUUID("prev-cluster-uuid") + .routingTableVersion(1) + .indicesRouting(List.of()) .build(); Metadata expactedMetadata = Metadata.builder().persistentSettings(Settings.builder().put("readonly", true).build()).build(); @@ -1378,6 +1383,33 @@ public void testGlobalMetadataUploadWaitTimeSetting() { assertEquals(globalMetadataUploadTimeout, remoteClusterStateService.getGlobalMetadataUploadTimeout().seconds()); } + public void testRemoteRoutingTableNotInitializedWhenDisabled() { + assertFalse(remoteClusterStateService.getRemoteRoutingTableService().isPresent()); + } + + public void testRemoteRoutingTableInitializedWhenEnabled() { + Settings newSettings = Settings.builder() + .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, "routing_repository") + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, "remote_store_repository") + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .build(); + clusterSettings.applySettings(newSettings); + + Settings nodeSettings = Settings.builder().put(REMOTE_PUBLICATION_EXPERIMENTAL, "true").build(); + FeatureFlags.initializeFeatureFlags(nodeSettings); + + remoteClusterStateService = new RemoteClusterStateService( + "test-node-id", + repositoriesServiceSupplier, + newSettings, + clusterService, + () -> 0L, + threadPool, + List.of(new RemoteIndexPathUploader(threadPool, newSettings, repositoriesServiceSupplier, clusterSettings)) + ); + assertTrue(remoteClusterStateService.getRemoteRoutingTableService().isPresent()); + } + private void mockObjectsForGettingPreviousClusterUUID(Map clusterUUIDsPointers) throws IOException { mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, false, Collections.emptyMap()); } @@ -1439,7 +1471,7 @@ private void mockObjectsForGettingPreviousClusterUUID( .build(); Map indexMetadataMap1 = Map.of("index-uuid1", indexMetadata1, "index-uuid2", indexMetadata2); mockBlobContainerForGlobalMetadata(blobContainer1, clusterManifest1, metadata1); - mockBlobContainer(blobContainer1, clusterManifest1, indexMetadataMap1, ClusterMetadataManifest.CODEC_V2); + mockBlobContainer(blobContainer1, clusterManifest1, indexMetadataMap1, ClusterMetadataManifest.CODEC_V3); List uploadedIndexMetadataList2 = List.of( new UploadedIndexMetadata("index1", "index-uuid1", "key1"), @@ -1471,7 +1503,7 @@ private void mockObjectsForGettingPreviousClusterUUID( .build(); Map indexMetadataMap2 = Map.of("index-uuid1", indexMetadata3, "index-uuid2", indexMetadata4); mockBlobContainerForGlobalMetadata(blobContainer2, clusterManifest2, metadata2); - mockBlobContainer(blobContainer2, clusterManifest2, indexMetadataMap2, ClusterMetadataManifest.CODEC_V2); + mockBlobContainer(blobContainer2, clusterManifest2, indexMetadataMap2, ClusterMetadataManifest.CODEC_V3); // differGlobalMetadata controls which one of IndexMetadata or Metadata object would be different // when comparing cluster-uuid3 and cluster-uuid1 state. @@ -1505,7 +1537,7 @@ private void mockObjectsForGettingPreviousClusterUUID( clusterUUIDCommitted.getOrDefault("cluster-uuid3", true) ); mockBlobContainerForGlobalMetadata(blobContainer3, clusterManifest3, metadata3); - mockBlobContainer(blobContainer3, clusterManifest3, indexMetadataMap3, ClusterMetadataManifest.CODEC_V2); + mockBlobContainer(blobContainer3, clusterManifest3, indexMetadataMap3, ClusterMetadataManifest.CODEC_V3); ArrayList mockBlobContainerOrderedList = new ArrayList<>( List.of(blobContainer1, blobContainer1, blobContainer3, blobContainer3, blobContainer2, blobContainer2) diff --git a/server/src/test/java/org/opensearch/gateway/remote/routingtable/IndexRoutingTableHeaderTests.java b/server/src/test/java/org/opensearch/gateway/remote/routingtable/IndexRoutingTableHeaderTests.java new file mode 100644 index 0000000000000..a3f0ac36a40f1 --- /dev/null +++ b/server/src/test/java/org/opensearch/gateway/remote/routingtable/IndexRoutingTableHeaderTests.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.routingtable; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.BytesStreamInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class IndexRoutingTableHeaderTests extends OpenSearchTestCase { + + public void testIndexRoutingTableHeader() throws IOException { + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + IndexRoutingTableHeader header = new IndexRoutingTableHeader(indexName); + try (BytesStreamOutput out = new BytesStreamOutput()) { + header.writeTo(out); + + BytesStreamInput in = new BytesStreamInput(out.bytes().toBytesRef().bytes); + IndexRoutingTableHeader headerRead = new IndexRoutingTableHeader(in); + assertEquals(indexName, headerRead.getIndexName()); + + } + } + +} diff --git a/server/src/test/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTableTests.java b/server/src/test/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTableTests.java new file mode 100644 index 0000000000000..72066d8afb45b --- /dev/null +++ b/server/src/test/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTableTests.java @@ -0,0 +1,87 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.routingtable; + +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicInteger; + +public class RemoteIndexRoutingTableTests extends OpenSearchTestCase { + + public void testRoutingTableInput() { + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + int numberOfShards = randomIntBetween(1, 10); + int numberOfReplicas = randomIntBetween(1, 10); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(numberOfReplicas) + ) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index(indexName)).build(); + + initialRoutingTable.getIndicesRouting().values().forEach(indexShardRoutingTables -> { + RemoteIndexRoutingTable indexRouting = new RemoteIndexRoutingTable(indexShardRoutingTables); + try (BytesStreamOutput streamOutput = new BytesStreamOutput();) { + indexRouting.writeTo(streamOutput); + RemoteIndexRoutingTable remoteIndexRoutingTable = new RemoteIndexRoutingTable( + streamOutput.bytes().streamInput(), + metadata.index(indexName).getIndex() + ); + IndexRoutingTable indexRoutingTable = remoteIndexRoutingTable.getIndexRoutingTable(); + assertEquals(numberOfShards, indexRoutingTable.getShards().size()); + assertEquals(metadata.index(indexName).getIndex(), indexRoutingTable.getIndex()); + assertEquals( + numberOfShards * (1 + numberOfReplicas), + indexRoutingTable.shardsWithState(ShardRoutingState.UNASSIGNED).size() + ); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } + + public void testRoutingTableInputStreamWithInvalidIndex() { + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .put(IndexMetadata.builder("invalid-index").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); + AtomicInteger assertionError = new AtomicInteger(); + initialRoutingTable.getIndicesRouting().values().forEach(indexShardRoutingTables -> { + RemoteIndexRoutingTable indexRouting = new RemoteIndexRoutingTable(indexShardRoutingTables); + try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { + indexRouting.writeTo(streamOutput); + RemoteIndexRoutingTable remoteIndexRoutingTable = new RemoteIndexRoutingTable( + streamOutput.bytes().streamInput(), + metadata.index("invalid-index").getIndex() + ); + } catch (AssertionError e) { + assertionError.getAndIncrement(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + + assertEquals(1, assertionError.get()); + } + +} diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index 54a562642d4ab..08a24a74fdc89 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -1872,7 +1872,7 @@ public void testForceMergeWithSoftDeletesRetentionAndRecoverySource() throws Exc try ( Store store = createStore(); InternalEngine engine = createEngine( - config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get) + config(indexSettings, store, createTempDir(), newMergePolicy(random(), false), null, null, globalCheckpoint::get) ) ) { int numDocs = scaledRandomIntBetween(10, 100); diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java index 2aa310ae959d9..98bcaa3a1a46b 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java @@ -208,7 +208,7 @@ public void testChangeLocale() throws IOException { fieldMapping(b -> b.field("type", "date").field("format", "E, d MMM yyyy HH:mm:ss Z").field("locale", "de")) ); - mapper.parse(source(b -> b.field("field", "Mi, 06 Dez 2000 02:55:00 -0800"))); + mapper.parse(source(b -> b.field("field", "Mi., 06 Dez. 2000 02:55:00 -0800"))); } public void testNullValue() throws IOException { diff --git a/server/src/test/java/org/opensearch/index/mapper/FieldTypeInferenceTests.java b/server/src/test/java/org/opensearch/index/mapper/FieldTypeInferenceTests.java new file mode 100644 index 0000000000000..807d0e96ce5e3 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/FieldTypeInferenceTests.java @@ -0,0 +1,210 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.document.Document; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.store.Directory; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.core.index.Index; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.lookup.SourceLookup; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.when; + +public class FieldTypeInferenceTests extends MapperServiceTestCase { + + private static final Map> documentMap; + static { + List listWithNull = new ArrayList<>(); + listWithNull.add(null); + documentMap = new HashMap<>(); + documentMap.put("text_field", List.of("The quick brown fox jumps over the lazy dog.")); + documentMap.put("int_field", List.of(789)); + documentMap.put("float_field", List.of(123.45)); + documentMap.put("date_field_1", List.of("2024-05-12T15:45:00Z")); + documentMap.put("date_field_2", List.of("2024-05-12")); + documentMap.put("boolean_field", List.of(true)); + documentMap.put("null_field", listWithNull); + documentMap.put("array_field_int", List.of(100, 200, 300, 400, 500)); + documentMap.put("array_field_text", List.of("100", "200")); + documentMap.put("object_type", List.of(Map.of("foo", Map.of("bar", 10)))); + } + + public void testJsonSupportedTypes() throws IOException { + MapperService mapperService = createMapperService(topMapping(b -> {})); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + when(queryShardContext.index()).thenReturn(new Index("test_index", "uuid")); + int totalDocs = 10000; + int docsPerLeafCount = 1000; + try (Directory dir = newDirectory()) { + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); + Document d = new Document(); + for (int i = 0; i < totalDocs; i++) { + iw.addDocument(d); + if ((i + 1) % docsPerLeafCount == 0) { + iw.commit(); + } + } + try (IndexReader reader = DirectoryReader.open(iw)) { + iw.close(); + FieldTypeInference typeInference = new FieldTypeInference("test_index", queryShardContext.getMapperService(), reader); + String[] fieldName = { "text_field" }; + Mapper mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertEquals("text", mapper.typeName()); + + fieldName[0] = "int_field"; + mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertEquals("long", mapper.typeName()); + + fieldName[0] = "float_field"; + mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertEquals("float", mapper.typeName()); + + fieldName[0] = "date_field_1"; + mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertEquals("date", mapper.typeName()); + + fieldName[0] = "date_field_2"; + mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertEquals("date", mapper.typeName()); + + fieldName[0] = "boolean_field"; + mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertEquals("boolean", mapper.typeName()); + + fieldName[0] = "array_field_int"; + mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertEquals("long", mapper.typeName()); + + fieldName[0] = "array_field_text"; + mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertEquals("text", mapper.typeName()); + + fieldName[0] = "object_type"; + mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertEquals("object", mapper.typeName()); + + fieldName[0] = "null_field"; + mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertNull(mapper); + + // If field is missing ensure that sample docIDs generated for inference are ordered and are in bounds + fieldName[0] = "missing_field"; + List> docsEvaluated = new ArrayList<>(); + int[] totalDocsEvaluated = { 0 }; + typeInference.setSampleSize(50); + mapper = typeInference.infer(new ValueFetcher() { + @Override + public List fetchValues(SourceLookup lookup) throws IOException { + docsEvaluated.get(docsEvaluated.size() - 1).add(lookup.docId()); + totalDocsEvaluated[0]++; + return documentMap.get(fieldName[0]); + } + + @Override + public void setNextReader(LeafReaderContext leafReaderContext) { + docsEvaluated.add(new ArrayList<>()); + } + }); + assertNull(mapper); + assertEquals(typeInference.getSampleSize(), totalDocsEvaluated[0]); + for (List docsPerLeaf : docsEvaluated) { + for (int j = 0; j < docsPerLeaf.size() - 1; j++) { + assertTrue(docsPerLeaf.get(j) < docsPerLeaf.get(j + 1)); + } + if (!docsPerLeaf.isEmpty()) { + assertTrue(docsPerLeaf.get(0) >= 0 && docsPerLeaf.get(docsPerLeaf.size() - 1) < docsPerLeafCount); + } + } + } + } + } + + public void testDeleteAllDocs() throws IOException { + MapperService mapperService = createMapperService(topMapping(b -> {})); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + when(queryShardContext.index()).thenReturn(new Index("test_index", "uuid")); + int totalDocs = 10000; + int docsPerLeafCount = 1000; + try (Directory dir = newDirectory()) { + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); + Document d = new Document(); + for (int i = 0; i < totalDocs; i++) { + iw.addDocument(d); + if ((i + 1) % docsPerLeafCount == 0) { + iw.commit(); + } + } + iw.deleteAll(); + iw.commit(); + + try (IndexReader reader = DirectoryReader.open(iw)) { + iw.close(); + FieldTypeInference typeInference = new FieldTypeInference("test_index", queryShardContext.getMapperService(), reader); + String[] fieldName = { "text_field" }; + Mapper mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertNull(mapper); + } + } + } + + public void testZeroDoc() throws IOException { + MapperService mapperService = createMapperService(topMapping(b -> {})); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + when(queryShardContext.index()).thenReturn(new Index("test_index", "uuid")); + try (Directory dir = newDirectory()) { + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); + try (IndexReader reader = DirectoryReader.open(iw)) { + iw.close(); + FieldTypeInference typeInference = new FieldTypeInference("test_index", queryShardContext.getMapperService(), reader); + String[] fieldName = { "text_field" }; + Mapper mapper = typeInference.infer(lookup -> documentMap.get(fieldName[0])); + assertNull(mapper); + } + } + } + + public void testSampleGeneration() throws IOException { + MapperService mapperService = createMapperService(topMapping(b -> {})); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + when(queryShardContext.index()).thenReturn(new Index("test_index", "uuid")); + int totalDocs = 10000; + int docsPerLeafCount = 1000; + try (Directory dir = newDirectory()) { + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); + Document d = new Document(); + for (int i = 0; i < totalDocs; i++) { + iw.addDocument(d); + if ((i + 1) % docsPerLeafCount == 0) { + iw.commit(); + } + } + try (IndexReader reader = DirectoryReader.open(iw)) { + iw.close(); + FieldTypeInference typeInference = new FieldTypeInference("test_index", queryShardContext.getMapperService(), reader); + typeInference.setSampleSize(1000 - 1); + typeInference.infer(lookup -> documentMap.get("unknown_field")); + assertThrows(IllegalArgumentException.class, () -> typeInference.setSampleSize(1000 + 1)); + typeInference.setSampleSize(1000); + typeInference.infer(lookup -> documentMap.get("unknown_field")); + } + } + } +} diff --git a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java index 096acd23429bd..f72bd76913c8f 100644 --- a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java @@ -322,8 +322,10 @@ public void testParentFilterFromInlineLeafInnerHitsNestedQuery() throws Exceptio when(searchContext.mapperService()).thenReturn(mapperService); InnerHitBuilder leafInnerHits = randomNestedInnerHits(); + // Set null for values not related with this test case leafInnerHits.setScriptFields(null); leafInnerHits.setHighlightBuilder(null); + leafInnerHits.setSorts(null); QueryBuilder innerQueryBuilder = spy(new MatchAllQueryBuilder()); when(innerQueryBuilder.toQuery(queryShardContext)).thenAnswer(invoke -> { diff --git a/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java b/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java index c4ba271d27ae9..de7f8977686a7 100644 --- a/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java +++ b/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java @@ -19,6 +19,7 @@ import java.net.InetAddress; import java.net.UnknownHostException; +import java.util.List; import java.util.Locale; import java.util.Map; @@ -28,6 +29,7 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_CRYPTO_SETTINGS_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; @@ -148,4 +150,77 @@ public void testNoCryptoMetadata() throws UnknownHostException { RepositoryMetadata repositoryMetadata = remoteStoreNodeAttribute.getRepositoriesMetadata().repositories().get(0); assertNull(repositoryMetadata.cryptoMetadata()); } + + public void testEqualsWithRepoSkip() throws UnknownHostException { + String repoName = "remote-store-A"; + String repoTypeSettingKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, repoName); + String repoSettingsKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, repoName); + Map attr = Map.of( + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + repoTypeSettingKey, + "s3", + repoSettingsKey, + "abc", + repoSettingsKey + "base_path", + "xyz" + ); + DiscoveryNode node = new DiscoveryNode( + "C", + new TransportAddress(InetAddress.getByName("localhost"), 9876), + attr, + emptySet(), + Version.CURRENT + ); + + RemoteStoreNodeAttribute remoteStoreNodeAttribute = new RemoteStoreNodeAttribute(node); + + String routingTableRepoName = "remote-store-B"; + String routingTableRepoTypeSettingKey = String.format( + Locale.ROOT, + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + routingTableRepoName + ); + String routingTableRepoSettingsKey = String.format( + Locale.ROOT, + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + routingTableRepoName + ); + + Map attr2 = Map.of( + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, + routingTableRepoName, + repoTypeSettingKey, + "s3", + repoSettingsKey, + "abc", + repoSettingsKey + "base_path", + "xyz", + routingTableRepoTypeSettingKey, + "s3", + routingTableRepoSettingsKey, + "xyz" + ); + DiscoveryNode node2 = new DiscoveryNode( + "C", + new TransportAddress(InetAddress.getByName("localhost"), 9876), + attr2, + emptySet(), + Version.CURRENT + ); + RemoteStoreNodeAttribute remoteStoreNodeAttribute2 = new RemoteStoreNodeAttribute(node2); + + assertFalse(remoteStoreNodeAttribute.equalsWithRepoSkip(remoteStoreNodeAttribute2, List.of())); + assertTrue(remoteStoreNodeAttribute.equalsWithRepoSkip(remoteStoreNodeAttribute2, List.of(routingTableRepoName))); + } } diff --git a/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java b/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java index 5b1035e24185d..fd3dd8c12e84e 100644 --- a/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java @@ -357,6 +357,70 @@ public void testDerivedFieldsParsingAndSerialization() throws IOException { } + public void testDerivedFieldsParsingAndSerializationObjectType() throws IOException { + { + String restContent = "{\n" + + " \"derived\": {\n" + + " \"duration\": {\n" + + " \"type\": \"long\",\n" + + " \"script\": \"emit(doc['test'])\"\n" + + " },\n" + + " \"ip_from_message\": {\n" + + " \"type\": \"keyword\",\n" + + " \"script\": \"emit(doc['message'])\"\n" + + " },\n" + + " \"object\": {\n" + + " \"type\": \"object\",\n" + + " \"script\": \"emit(doc['test'])\",\n" + + " \"format\": \"dd-MM-yyyy\",\n" + + " \"source_indexed_field\": \"test\",\n" + + " \"ignore_malformed\": true,\n" + + " \"properties\": {\n" + + " \"sub_field\": \"text\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"query\" : {\n" + + " \"match\": { \"content\": { \"query\": \"foo bar\" }}\n" + + " }\n" + + "}"; + + String expectedContent = + "{\"query\":{\"match\":{\"content\":{\"query\":\"foo bar\",\"operator\":\"OR\",\"prefix_length\":0,\"max_expansions\":50,\"fuzzy_transpositions\":true,\"lenient\":false,\"zero_terms_query\":\"NONE\",\"auto_generate_synonyms_phrase_query\":true,\"boost\":1.0}}},\"derived\":{\"duration\":{\"type\":\"long\",\"script\":\"emit(doc['test'])\"},\"ip_from_message\":{\"type\":\"keyword\",\"script\":\"emit(doc['message'])\"},\"object\":{\"format\":\"dd-MM-yyyy\",\"source_indexed_field\":\"test\",\"ignore_malformed\":true,\"type\":\"object\",\"script\":\"emit(doc['test'])\",\"properties\":{\"sub_field\":\"text\"}},\"derived_field\":{\"type\":\"object\",\"script\":{\"source\":\"emit(doc['message']\",\"lang\":\"painless\"},\"properties\":{\"sub_field_2\":\"keyword\"},\"source_indexed_field\":\"message\",\"format\":\"dd-MM-yyyy\",\"ignore_malformed\":true}}}"; + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, restContent)) { + SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(parser); + searchSourceBuilder.derivedField( + "derived_field", + "object", + new Script("emit(doc['message']"), + Map.of("sub_field_2", "keyword"), + "message", + "dd-MM-yyyy", + true + ); + searchSourceBuilder = rewrite(searchSourceBuilder); + assertEquals(3, searchSourceBuilder.getDerivedFieldsObject().size()); + assertEquals(1, searchSourceBuilder.getDerivedFields().size()); + assertEquals(1, searchSourceBuilder.getDerivedFields().get(0).getProperties().size()); + assertEquals("message", searchSourceBuilder.getDerivedFields().get(0).getSourceIndexedField()); + assertEquals("dd-MM-yyyy", searchSourceBuilder.getDerivedFields().get(0).getFormat()); + assertTrue(searchSourceBuilder.getDerivedFields().get(0).getIgnoreMalformed()); + + try (BytesStreamOutput output = new BytesStreamOutput()) { + searchSourceBuilder.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { + SearchSourceBuilder deserializedBuilder = new SearchSourceBuilder(in); + String actualContent = deserializedBuilder.toString(); + assertEquals(expectedContent, actualContent); + assertEquals(searchSourceBuilder.hashCode(), deserializedBuilder.hashCode()); + assertNotSame(searchSourceBuilder, deserializedBuilder); + } + } + } + } + } + public void testAggsParsing() throws IOException { { String restContent = "{\n" diff --git a/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java b/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java index c5f36fcc01983..95a8267734a07 100644 --- a/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java +++ b/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java @@ -49,13 +49,13 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.io.stream.BufferedChecksumStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.compress.CompressorRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.translog.BufferedChecksumStreamOutput; import org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 460aaa08a224d..86de008b5dee5 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -191,7 +191,6 @@ import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; -import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesModule; @@ -1681,7 +1680,6 @@ private Environment createEnvironment(String nodeName) { ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING.get(Settings.EMPTY) ) - .put(FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.getKey(), 5) .put(MappingUpdatedAction.INDICES_MAX_IN_FLIGHT_UPDATES_SETTING.getKey(), 1000) // o.w. some tests might block .build() ); @@ -2252,7 +2250,8 @@ public void onFailure(final Exception e) { ), shardLimitValidator, indicesService, - clusterInfoService::getClusterInfo + clusterInfoService::getClusterInfo, + () -> 5.0 ); actions.put( PutMappingAction.INSTANCE, @@ -2560,7 +2559,8 @@ public void start(ClusterState initialState) { ElectionStrategy.DEFAULT_INSTANCE, () -> new StatusInfo(HEALTHY, "healthy-info"), persistedStateRegistry, - remoteStoreNodeService + remoteStoreNodeService, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ); clusterManagerService.setClusterStatePublisher(coordinator); coordinator.start(); diff --git a/server/src/test/java/org/opensearch/telemetry/TestInMemoryCounter.java b/server/src/test/java/org/opensearch/telemetry/TestInMemoryCounter.java new file mode 100644 index 0000000000000..d9aee5ebfa941 --- /dev/null +++ b/server/src/test/java/org/opensearch/telemetry/TestInMemoryCounter.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.tags.Tags; + +import java.util.HashMap; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * This is a simple implementation of Counter which is utilized by TestInMemoryMetricsRegistry for + * Unit Tests. It initializes an atomic integer to add the values of counter which doesn't have any tags + * along with a map to store the values recorded against the tags. + * The map and atomic integer can then be used to get the added values. + */ +public class TestInMemoryCounter implements Counter { + + private AtomicInteger counterValue = new AtomicInteger(0); + private ConcurrentHashMap, Double> counterValueForTags = new ConcurrentHashMap<>(); + + public Integer getCounterValue() { + return this.counterValue.get(); + } + + public ConcurrentHashMap, Double> getCounterValueForTags() { + return this.counterValueForTags; + } + + @Override + public void add(double value) { + counterValue.addAndGet((int) value); + } + + @Override + public synchronized void add(double value, Tags tags) { + HashMap hashMap = (HashMap) tags.getTagsMap(); + if (counterValueForTags.get(hashMap) == null) { + counterValueForTags.put(hashMap, value); + } else { + value = counterValueForTags.get(hashMap) + value; + counterValueForTags.put(hashMap, value); + } + } +} diff --git a/server/src/test/java/org/opensearch/telemetry/TestInMemoryHistogram.java b/server/src/test/java/org/opensearch/telemetry/TestInMemoryHistogram.java new file mode 100644 index 0000000000000..ff28df2b6529d --- /dev/null +++ b/server/src/test/java/org/opensearch/telemetry/TestInMemoryHistogram.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.telemetry.metrics.Histogram; +import org.opensearch.telemetry.metrics.tags.Tags; + +import java.util.HashMap; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * This is a simple implementation of Histogram which is utilized by TestInMemoryMetricsRegistry for + * Unit Tests. It initializes an atomic integer to record the value of histogram which doesn't have any tags + * along with a map to store the values recorded against the tags. + * The map and atomic integer can then be used to get the recorded values. + */ +public class TestInMemoryHistogram implements Histogram { + + private AtomicInteger histogramValue = new AtomicInteger(0); + private ConcurrentHashMap, Double> histogramValueForTags = new ConcurrentHashMap<>(); + + public Integer getHistogramValue() { + return this.histogramValue.get(); + } + + public ConcurrentHashMap, Double> getHistogramValueForTags() { + return this.histogramValueForTags; + } + + @Override + public void record(double value) { + histogramValue.addAndGet((int) value); + } + + @Override + public synchronized void record(double value, Tags tags) { + HashMap hashMap = (HashMap) tags.getTagsMap(); + histogramValueForTags.put(hashMap, value); + } +} diff --git a/server/src/test/java/org/opensearch/telemetry/TestInMemoryMetricsRegistry.java b/server/src/test/java/org/opensearch/telemetry/TestInMemoryMetricsRegistry.java new file mode 100644 index 0000000000000..6d395085b12ea --- /dev/null +++ b/server/src/test/java/org/opensearch/telemetry/TestInMemoryMetricsRegistry.java @@ -0,0 +1,71 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.Histogram; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.tags.Tags; + +import java.io.Closeable; +import java.io.IOException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Supplier; + +/** + * This is a simple implementation of MetricsRegistry which can be utilized by Unit Tests. + * It just initializes and stores counters/histograms within a map, once created. + * The maps can then be used to get the counters/histograms by their names. + */ +public class TestInMemoryMetricsRegistry implements MetricsRegistry { + + private ConcurrentHashMap counterStore = new ConcurrentHashMap<>(); + private ConcurrentHashMap histogramStore = new ConcurrentHashMap<>(); + + public ConcurrentHashMap getCounterStore() { + return this.counterStore; + } + + public ConcurrentHashMap getHistogramStore() { + return this.histogramStore; + } + + @Override + public Counter createCounter(String name, String description, String unit) { + TestInMemoryCounter counter = new TestInMemoryCounter(); + counterStore.putIfAbsent(name, counter); + return counter; + } + + @Override + public Counter createUpDownCounter(String name, String description, String unit) { + /** + * ToDo: To be implemented when required. + */ + return null; + } + + @Override + public Histogram createHistogram(String name, String description, String unit) { + TestInMemoryHistogram histogram = new TestInMemoryHistogram(); + histogramStore.putIfAbsent(name, histogram); + return histogram; + } + + @Override + public Closeable createGauge(String name, String description, String unit, Supplier valueProvider, Tags tags) { + /** + * ToDo: To be implemented when required. + */ + return null; + } + + @Override + public void close() throws IOException {} +} diff --git a/settings.gradle b/settings.gradle index f58a8b35b10eb..ca8538a967ef7 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.17.4" + id "com.gradle.develocity" version "3.17.4" } ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE') diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index 3ae737bf63923..1c2270bab1260 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -1183,7 +1183,8 @@ protected Optional getDisruptableMockTransport(Transpo getElectionStrategy(), nodeHealthService, persistedStateRegistry, - remoteStoreNodeService + remoteStoreNodeService, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ); clusterManagerService.setClusterStatePublisher(coordinator); final GatewayService gatewayService = new GatewayService( diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index 5a3f3b5a07a8d..5ee65e7ea1a1c 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -125,7 +125,6 @@ import org.opensearch.index.analysis.TokenizerFactory; import org.opensearch.index.remote.RemoteStoreEnums; import org.opensearch.index.remote.RemoteStorePathStrategy; -import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.plugins.AnalysisPlugin; @@ -188,6 +187,7 @@ import static java.util.Collections.emptyMap; import static org.opensearch.core.common.util.CollectionUtils.arrayAsArrayList; +import static org.opensearch.index.store.remote.filecache.FileCacheSettings.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; @@ -1278,7 +1278,7 @@ public static Settings.Builder settings(Version version) { public static Settings.Builder remoteIndexSettings(Version version) { Settings.Builder builder = Settings.builder() - .put(FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.getKey(), 5) + .put(DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.getKey(), 5) .put(IndexMetadata.SETTING_VERSION_CREATED, version) .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey()); return builder; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/Assertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/Assertion.java index b9cbaacdf8873..732d4291ae670 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/Assertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/Assertion.java @@ -37,6 +37,8 @@ import java.io.IOException; import java.util.Map; +import static org.junit.Assert.fail; + /** * Base class for executable sections that hold assertions */ @@ -79,6 +81,41 @@ protected final Object getActualValue(ClientYamlTestExecutionContext executionCo return executionContext.response(field); } + static Object convertActualValue(Object actualValue, Object expectedValue) { + if (actualValue == null || expectedValue.getClass().isAssignableFrom(actualValue.getClass())) { + return actualValue; + } + if (actualValue instanceof Number && expectedValue instanceof Number) { + if (expectedValue instanceof Float) { + return Float.parseFloat(actualValue.toString()); + } else if (expectedValue instanceof Double) { + return Double.parseDouble(actualValue.toString()); + } else if (expectedValue instanceof Integer) { + return Integer.parseInt(actualValue.toString()); + } else if (expectedValue instanceof Long) { + return Long.parseLong(actualValue.toString()); + } + } + // Force a class cast exception here, so developers can flesh out the above logic as needed. + try { + expectedValue.getClass().cast(actualValue); + } catch (ClassCastException e) { + fail( + "Type mismatch: Expected value (" + + expectedValue + + ") has type " + + expectedValue.getClass() + + ". " + + "Actual value (" + + actualValue + + ") has type " + + actualValue.getClass() + + "." + ); + } + return actualValue; + } + @Override public XContentLocation getLocation() { return location; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanAssertion.java index 4c2e70f37a33c..0d20dc7c326b0 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanAssertion.java @@ -71,6 +71,7 @@ public GreaterThanAssertion(XContentLocation location, String field, Object expe @Override protected void doAssert(Object actualValue, Object expectedValue) { logger.trace("assert that [{}] is greater than [{}] (field: [{}])", actualValue, expectedValue, getField()); + actualValue = convertActualValue(actualValue, expectedValue); assertThat( "value of [" + getField() + "] is not comparable (got [" + safeClass(actualValue) + "])", actualValue, diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java index 8e929eff44348..a6435c1303489 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java @@ -72,6 +72,7 @@ public GreaterThanEqualToAssertion(XContentLocation location, String field, Obje @Override protected void doAssert(Object actualValue, Object expectedValue) { logger.trace("assert that [{}] is greater than or equal to [{}] (field: [{}])", actualValue, expectedValue, getField()); + actualValue = convertActualValue(actualValue, expectedValue); assertThat( "value of [" + getField() + "] is not comparable (got [" + safeClass(actualValue) + "])", actualValue, diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java index d6e2ae1e23996..acffe03d34439 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java @@ -72,6 +72,7 @@ public LessThanAssertion(XContentLocation location, String field, Object expecte @Override protected void doAssert(Object actualValue, Object expectedValue) { logger.trace("assert that [{}] is less than [{}] (field: [{}])", actualValue, expectedValue, getField()); + actualValue = convertActualValue(actualValue, expectedValue); assertThat( "value of [" + getField() + "] is not comparable (got [" + safeClass(actualValue) + "])", actualValue, diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java index ee46c04496f32..d685d3e46a543 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java @@ -72,6 +72,7 @@ public LessThanOrEqualToAssertion(XContentLocation location, String field, Objec @Override protected void doAssert(Object actualValue, Object expectedValue) { logger.trace("assert that [{}] is less than or equal to [{}] (field: [{}])", actualValue, expectedValue, getField()); + actualValue = convertActualValue(actualValue, expectedValue); assertThat( "value of [" + getField() + "] is not comparable (got [" + safeClass(actualValue) + "])", actualValue,