diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 398c3d52595f1..1f80ed34d6c10 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -34,3 +34,4 @@ BWC_VERSION: - "2.14.0" - "2.14.1" - "2.15.0" + - "2.16.0" diff --git a/.github/ISSUE_TEMPLATE/failed_check.md b/.github/ISSUE_TEMPLATE/failed_check.md deleted file mode 100644 index 71508c9f5bd43..0000000000000 --- a/.github/ISSUE_TEMPLATE/failed_check.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: '[AUTOCUT] Gradle Check Failure on push to {{ env.branch_name }}' -labels: '>test-failure, bug, autocut' ---- - -Gradle check has failed on push of your commit {{ env.pr_from_sha }}. -Please examine the workflow log {{ env.workflow_url }}. -Is the failure [a flaky test](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) unrelated to your change? diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 07185ef4c65e3..2909ee95349ce 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -14,7 +14,7 @@ permissions: jobs: check-files: runs-on: ubuntu-latest - outputs: + outputs: RUN_GRADLE_CHECK: ${{ steps.changed-files-specific.outputs.any_changed }} steps: - uses: actions/checkout@v4 @@ -26,7 +26,7 @@ jobs: release-notes/*.md .github/** *.md - + gradle-check: needs: check-files if: github.repository == 'opensearch-project/OpenSearch' && needs.check-files.outputs.RUN_GRADLE_CHECK == 'true' @@ -158,15 +158,6 @@ jobs: Please examine the workflow log, locate, and copy-paste the failure(s) below, then iterate to green. Is the failure [a flaky test](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) unrelated to your change? - - name: Create Issue On Push Failure - if: ${{ github.event_name == 'push' && failure() }} - uses: dblock/create-a-github-issue@v3 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - assignees: ${{ github.event.head_commit.author.username }}, ${{ github.triggering_actor }} - filename: .github/ISSUE_TEMPLATE/failed_check.md - check-result: needs: [check-files, gradle-check] if: always() @@ -174,4 +165,4 @@ jobs: steps: - name: Fail if gradle-check fails if: ${{ needs.check-files.outputs.RUN_GRADLE_CHECK && needs.gradle-check.result == 'failure' }} - run: exit 1 \ No newline at end of file + run: exit 1 diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index f66dfa6f52e7a..06b761b1df8bd 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies ### Changed +- Changed locale provider from COMPAT to CLDR ([#14345](https://github.com/opensearch-project/OpenSearch/pull/14345)) - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) - Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) - Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) @@ -47,7 +48,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) - Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) - Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) -- Java high-level REST client bulk() is not respecting the bulkRequest.requireAlias(true) method call ([#14146](https://github.com/opensearch-project/OpenSearch/pull/14146)) ### Security diff --git a/CHANGELOG.md b/CHANGELOG.md index cee46edd988c4..347c28792b35b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,75 +5,27 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added -- Add leader and follower check failure counter metrics ([#12439](https://github.com/opensearch-project/OpenSearch/pull/12439)) -- Add latency metrics for instrumenting critical clusterManager code paths ([#12333](https://github.com/opensearch-project/OpenSearch/pull/12333)) -- Add support for Azure Managed Identity in repository-azure ([#12423](https://github.com/opensearch-project/OpenSearch/issues/12423)) -- Add useCompoundFile index setting ([#13478](https://github.com/opensearch-project/OpenSearch/pull/13478)) -- Make outbound side of transport protocol dependent ([#13293](https://github.com/opensearch-project/OpenSearch/pull/13293)) -- [Remote Store] Add dynamic cluster settings to set timeout for segments upload to Remote Store ([#13679](https://github.com/opensearch-project/OpenSearch/pull/13679)) -- [Remote Store] Upload translog checkpoint as object metadata to translog.tlog([#13637](https://github.com/opensearch-project/OpenSearch/pull/13637)) -- Add getMetadataFields to MapperService ([#13819](https://github.com/opensearch-project/OpenSearch/pull/13819)) -- [Remote State] Add async remote state deletion task running on an interval, configurable by a setting ([#13131](https://github.com/opensearch-project/OpenSearch/pull/13131)) -- Add "wildcard" field type that supports efficient wildcard, prefix, and regexp queries ([#13461](https://github.com/opensearch-project/OpenSearch/pull/13461)) -- Allow setting query parameters on requests ([#13776](https://github.com/opensearch-project/OpenSearch/issues/13776)) -- Add capability to disable source recovery_source for an index ([#13590](https://github.com/opensearch-project/OpenSearch/pull/13590)) -- Add remote routing table for remote state publication with experimental feature flag ([#13304](https://github.com/opensearch-project/OpenSearch/pull/13304)) -- Add upload flow for writing routing table to remote store ([#13870](https://github.com/opensearch-project/OpenSearch/pull/13870)) -- Add dynamic action retry timeout setting ([#14022](https://github.com/opensearch-project/OpenSearch/issues/14022)) -- [Remote Store] Add support to disable flush based on translog reader count ([#14027](https://github.com/opensearch-project/OpenSearch/pull/14027)) -- Add recovery chunk size setting ([#13997](https://github.com/opensearch-project/OpenSearch/pull/13997)) -- [Query Insights] Add exporter support for top n queries ([#12982](https://github.com/opensearch-project/OpenSearch/pull/12982)) -- [Query Insights] Add X-Opaque-Id to search request metadata for top n queries ([#13374](https://github.com/opensearch-project/OpenSearch/pull/13374)) -- [Streaming Indexing] Enhance RestAction with request / response streaming support ([#13772](https://github.com/opensearch-project/OpenSearch/pull/13772)) -- Add support for query level resource usage tracking ([#13172](https://github.com/opensearch-project/OpenSearch/pull/13172)) -- Move Remote Store Migration from DocRep to GA and modify remote migration settings name ([#14100](https://github.com/opensearch-project/OpenSearch/pull/14100)) -- Derived field object type support ([#13720](https://github.com/opensearch-project/OpenSearch/pull/13720)) -- [Query Insights] Add cpu and memory metrics to top n queries ([#13739](https://github.com/opensearch-project/OpenSearch/pull/13739)) +- Add fingerprint ingest processor ([#13724](https://github.com/opensearch-project/OpenSearch/pull/13724)) ### Dependencies -- Bump `com.github.spullara.mustache.java:compiler` from 0.9.10 to 0.9.13 ([#13329](https://github.com/opensearch-project/OpenSearch/pull/13329), [#13559](https://github.com/opensearch-project/OpenSearch/pull/13559)) - Bump `org.gradle.test-retry` from 1.5.8 to 1.5.9 ([#13442](https://github.com/opensearch-project/OpenSearch/pull/13442)) -- Bump `org.apache.commons:commons-text` from 1.11.0 to 1.12.0 ([#13557](https://github.com/opensearch-project/OpenSearch/pull/13557)) -- Bump `org.hdrhistogram:HdrHistogram` from 2.1.12 to 2.2.2 ([#13556](https://github.com/opensearch-project/OpenSearch/pull/13556), [#13986](https://github.com/opensearch-project/OpenSearch/pull/13986)) -- Bump `com.gradle.enterprise` from 3.17.2 to 3.17.4 ([#13641](https://github.com/opensearch-project/OpenSearch/pull/13641), [#13753](https://github.com/opensearch-project/OpenSearch/pull/13753)) -- Bump `org.apache.hadoop:hadoop-minicluster` from 3.3.6 to 3.4.0 ([#13642](https://github.com/opensearch-project/OpenSearch/pull/13642)) -- Bump `mockito` from 5.11.0 to 5.12.0 ([#13665](https://github.com/opensearch-project/OpenSearch/pull/13665)) -- Bump `com.google.code.gson:gson` from 2.10.1 to 2.11.0 ([#13752](https://github.com/opensearch-project/OpenSearch/pull/13752)) -- Bump `ch.qos.logback:logback-core` from 1.5.3 to 1.5.6 ([#13756](https://github.com/opensearch-project/OpenSearch/pull/13756)) -- Bump `netty` from 4.1.109.Final to 4.1.110.Final ([#13802](https://github.com/opensearch-project/OpenSearch/pull/13802)) -- Bump `jackson` from 2.17.0 to 2.17.1 ([#13817](https://github.com/opensearch-project/OpenSearch/pull/13817)) -- Bump `reactor` from 3.5.15 to 3.5.17 ([#13825](https://github.com/opensearch-project/OpenSearch/pull/13825)) -- Bump `reactor-netty` from 1.1.17 to 1.1.19 ([#13825](https://github.com/opensearch-project/OpenSearch/pull/13825)) -- Bump `commons-cli:commons-cli` from 1.7.0 to 1.8.0 ([#13840](https://github.com/opensearch-project/OpenSearch/pull/13840)) -- Bump `org.apache.xmlbeans:xmlbeans` from 5.2.0 to 5.2.1 ([#13839](https://github.com/opensearch-project/OpenSearch/pull/13839)) -- Bump `actions/checkout` from 3 to 4 ([#13935](https://github.com/opensearch-project/OpenSearch/pull/13935)) -- Bump `com.netflix.nebula.ospackage-base` from 11.9.0 to 11.9.1 ([#13933](https://github.com/opensearch-project/OpenSearch/pull/13933)) - Update to Apache Lucene 9.11.0 ([#14042](https://github.com/opensearch-project/OpenSearch/pull/14042)) -- Bump `com.azure:azure-core-http-netty` from 1.12.8 to 1.15.1 ([#14128](https://github.com/opensearch-project/OpenSearch/pull/14128)) -- Bump `tim-actions/get-pr-commits` from 1.1.0 to 1.3.1 ([#14126](https://github.com/opensearch-project/OpenSearch/pull/14126)) +- Bump `netty` from 4.1.110.Final to 4.1.111.Final ([#14356](https://github.com/opensearch-project/OpenSearch/pull/14356)) +- Bump `org.wiremock:wiremock-standalone` from 3.3.1 to 3.6.0 ([#14361](https://github.com/opensearch-project/OpenSearch/pull/14361)) +- Bump `reactor` from 3.5.17 to 3.5.18 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395)) +- Bump `reactor-netty` from 1.1.19 to 1.1.20 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395)) +- Bump `commons-net:commons-net` from 3.10.0 to 3.11.1 ([#14396](https://github.com/opensearch-project/OpenSearch/pull/14396)) +- Bump `org.apache.commons:commons-configuration2` from 2.10.1 to 2.11.0 ([#14399](https://github.com/opensearch-project/OpenSearch/pull/14399)) +- Bump `com.gradle.develocity` from 3.17.4 to 3.17.5 ([#14397](https://github.com/opensearch-project/OpenSearch/pull/14397)) ### Changed -- Add ability for Boolean and date field queries to run when only doc_values are enabled ([#11650](https://github.com/opensearch-project/OpenSearch/pull/11650)) -- Refactor implementations of query phase searcher, allow QueryCollectorContext to have zero collectors ([#13481](https://github.com/opensearch-project/OpenSearch/pull/13481)) -- Adds support to inject telemetry instances to plugins ([#13636](https://github.com/opensearch-project/OpenSearch/pull/13636)) -- Adds support to provide tags with value in Gauge metric. ([#13994](https://github.com/opensearch-project/OpenSearch/pull/13994)) -- Move cache removal notifications outside lru lock ([#14017](https://github.com/opensearch-project/OpenSearch/pull/14017)) ### Deprecated ### Removed -- Remove handling of index.mapper.dynamic in AutoCreateIndex([#13067](https://github.com/opensearch-project/OpenSearch/pull/13067)) ### Fixed -- Fix get field mapping API returns 404 error in mixed cluster with multiple versions ([#13624](https://github.com/opensearch-project/OpenSearch/pull/13624)) -- Allow clearing `remote_store.compatibility_mode` setting ([#13646](https://github.com/opensearch-project/OpenSearch/pull/13646)) -- Fix ReplicaShardBatchAllocator to batch shards without duplicates ([#13710](https://github.com/opensearch-project/OpenSearch/pull/13710)) -- Don't return negative scores from `multi_match` query with `cross_fields` type ([#13829](https://github.com/opensearch-project/OpenSearch/pull/13829)) -- Painless: ensure type "UnmodifiableMap" for params ([#13885](https://github.com/opensearch-project/OpenSearch/pull/13885)) -- Pass parent filter to inner hit query ([#13903](https://github.com/opensearch-project/OpenSearch/pull/13903)) -- Fix NPE on restore searchable snapshot ([#13911](https://github.com/opensearch-project/OpenSearch/pull/13911)) -- Fix double invocation of postCollection when MultiBucketCollector is present ([#14015](https://github.com/opensearch-project/OpenSearch/pull/14015)) ### Security -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.13...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.15...2.x diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index c68cc0406d3a6..b984ef3800490 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -128,7 +128,7 @@ dependencies { testFixturesApi "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}" testFixturesApi gradleApi() testFixturesApi gradleTestKit() - testImplementation 'org.wiremock:wiremock-standalone:3.3.1' + testImplementation 'org.wiremock:wiremock-standalone:3.6.0' testImplementation "org.mockito:mockito-core:${props.getProperty('mockito')}" integTestImplementation('org.spockframework:spock-core:2.3-groovy-3.0') { exclude module: "groovy" diff --git a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java index 2ea8c2d015ecc..d0cb2da9c1dd3 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java @@ -110,7 +110,7 @@ public void execute(Task t) { if (BuildParams.getRuntimeJavaVersion() == JavaVersion.VERSION_1_8) { test.systemProperty("java.locale.providers", "SPI,JRE"); } else { - test.systemProperty("java.locale.providers", "SPI,COMPAT"); + test.systemProperty("java.locale.providers", "SPI,CLDR"); if (test.getJavaVersion().compareTo(JavaVersion.VERSION_17) < 0) { test.jvmArgs("--illegal-access=warn"); } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java index 448ba8a96ef02..570ab4a9f70e1 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java @@ -199,7 +199,28 @@ private JavaVersion determineJavaVersion(String description, File javaHome, Java } private JvmInstallationMetadata getJavaInstallation(File javaHome) { - final InstallationLocation location = new InstallationLocation(javaHome, "Java home"); + InstallationLocation location = null; + + try { + try { + // The InstallationLocation(File, String) is used by Gradle pre-8.8 + location = (InstallationLocation) MethodHandles.publicLookup() + .findConstructor(InstallationLocation.class, MethodType.methodType(void.class, File.class, String.class)) + .invokeExact(javaHome, "Java home"); + } catch (Throwable ex) { + // The InstallationLocation::userDefined is used by Gradle post-8.7 + location = (InstallationLocation) MethodHandles.publicLookup() + .findStatic( + InstallationLocation.class, + "userDefined", + MethodType.methodType(InstallationLocation.class, File.class, String.class) + ) + .invokeExact(javaHome, "Java home"); + + } + } catch (Throwable ex) { + throw new IllegalStateException("Unable to find suitable InstallationLocation constructor / factory method", ex); + } try { try { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java index 7ab91448252f2..a7f720855951a 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java @@ -148,8 +148,8 @@ private void configureGeneralTaskDefaults(Project project) { project.getTasks().withType(AbstractCopyTask.class).configureEach(t -> { t.dependsOn(project.getTasks().withType(EmptyDirTask.class)); t.setIncludeEmptyDirs(true); - t.setDirMode(0755); - t.setFileMode(0644); + t.dirPermissions(perms -> perms.unix(0755)); + t.filePermissions(perms -> perms.unix(0644)); }); // common config across all archives diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesPrecommitPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesPrecommitPlugin.java index d4dcde9d63087..28a344de31ddb 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesPrecommitPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesPrecommitPlugin.java @@ -33,11 +33,14 @@ package org.opensearch.gradle.precommit; import org.opensearch.gradle.dependencies.CompileOnlyResolvePlugin; +import org.opensearch.gradle.util.GradleUtils; import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.ProjectDependency; +import org.gradle.api.file.FileCollection; import org.gradle.api.plugins.JavaPlugin; +import org.gradle.api.provider.Provider; import org.gradle.api.tasks.TaskProvider; public class DependencyLicensesPrecommitPlugin extends PrecommitPlugin { @@ -48,15 +51,16 @@ public TaskProvider createTask(Project project) { TaskProvider dependencyLicenses = project.getTasks() .register("dependencyLicenses", DependencyLicensesTask.class); + final Configuration runtimeClasspath = project.getConfigurations().getByName(JavaPlugin.RUNTIME_CLASSPATH_CONFIGURATION_NAME); + final Configuration compileOnly = project.getConfigurations() + .getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME); + final Provider provider = project.provider( + () -> GradleUtils.getFiles(project, runtimeClasspath, dependency -> dependency instanceof ProjectDependency == false) + .minus(compileOnly) + ); + // only require dependency licenses for non-opensearch deps - dependencyLicenses.configure(t -> { - Configuration runtimeClasspath = project.getConfigurations().getByName(JavaPlugin.RUNTIME_CLASSPATH_CONFIGURATION_NAME); - Configuration compileOnly = project.getConfigurations() - .getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME); - t.setDependencies( - runtimeClasspath.fileCollection(dependency -> dependency instanceof ProjectDependency == false).minus(compileOnly) - ); - }); + dependencyLicenses.configure(t -> t.getDependencies().set(provider)); // we also create the updateShas helper task that is associated with dependencyLicenses project.getTasks().register("updateShas", UpdateShasTask.class, t -> t.setParentTask(dependencyLicenses)); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java index e801681c5c386..7248e0bc14431 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java @@ -39,6 +39,7 @@ import org.gradle.api.file.FileCollection; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; +import org.gradle.api.provider.Property; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.InputDirectory; import org.gradle.api.tasks.InputFiles; @@ -121,7 +122,7 @@ public class DependencyLicensesTask extends DefaultTask { /** * A collection of jar files that should be checked. */ - private FileCollection dependencies; + private Property dependenciesProvider; /** * The directory to find the license and sha files in. @@ -158,12 +159,11 @@ public void mapping(Map props) { } @InputFiles - public FileCollection getDependencies() { - return dependencies; - } - - public void setDependencies(FileCollection dependencies) { - this.dependencies = dependencies; + public Property getDependencies() { + if (dependenciesProvider == null) { + dependenciesProvider = getProject().getObjects().property(FileCollection.class); + } + return dependenciesProvider; } @Optional @@ -190,6 +190,11 @@ public void ignoreSha(String dep) { @TaskAction public void checkDependencies() throws IOException, NoSuchAlgorithmException { + if (dependenciesProvider == null) { + throw new GradleException("No dependencies variable defined."); + } + + final FileCollection dependencies = dependenciesProvider.get(); if (dependencies == null) { throw new GradleException("No dependencies variable defined."); } @@ -226,7 +231,7 @@ public void checkDependencies() throws IOException, NoSuchAlgorithmException { } } - checkDependencies(licenses, notices, sources, shaFiles); + checkDependencies(dependencies, licenses, notices, sources, shaFiles); licenses.forEach((item, exists) -> failIfAnyMissing(item, exists, "license")); @@ -255,6 +260,7 @@ private void failIfAnyMissing(String item, Boolean exists, String type) { } private void checkDependencies( + FileCollection dependencies, Map licenses, Map notices, Map sources, diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java index efcd01f163089..f7bb708933803 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java @@ -37,6 +37,7 @@ import org.opensearch.gradle.LoggedExec; import org.opensearch.gradle.OS; import org.opensearch.gradle.dependencies.CompileOnlyResolvePlugin; +import org.opensearch.gradle.util.GradleUtils; import org.gradle.api.DefaultTask; import org.gradle.api.JavaVersion; import org.gradle.api.artifacts.Configuration; @@ -203,11 +204,13 @@ public Set getJarsToScan() { // or dependencies added as `files(...)`, we can't be sure if those are third party or not. // err on the side of scanning these to make sure we don't miss anything Spec reallyThirdParty = dep -> dep.getGroup() != null && dep.getGroup().startsWith("org.opensearch") == false; - Set jars = getRuntimeConfiguration().getResolvedConfiguration().getFiles(reallyThirdParty); - Set compileOnlyConfiguration = getProject().getConfigurations() - .getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME) - .getResolvedConfiguration() - .getFiles(reallyThirdParty); + + Set jars = GradleUtils.getFiles(getProject(), getRuntimeConfiguration(), reallyThirdParty).getFiles(); + Set compileOnlyConfiguration = GradleUtils.getFiles( + getProject(), + getProject().getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME), + reallyThirdParty + ).getFiles(); // don't scan provided dependencies that we already scanned, e.x. don't scan cores dependencies for every plugin if (compileOnlyConfiguration != null) { jars.removeAll(compileOnlyConfiguration); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/UpdateShasTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/UpdateShasTask.java index 3fe08888afb09..de479f3b560b6 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/UpdateShasTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/UpdateShasTask.java @@ -66,7 +66,7 @@ public UpdateShasTask() { public void updateShas() throws NoSuchAlgorithmException, IOException { Set shaFiles = parentTask.get().getShaFiles(); - for (File dependency : parentTask.get().getDependencies()) { + for (File dependency : parentTask.get().getDependencies().get()) { String jarName = dependency.getName(); File shaFile = parentTask.get().getShaFile(jarName); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java b/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java index e82d8ed73ced2..3352dda98ef66 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java @@ -184,7 +184,7 @@ private void visitSymbolicLink(final FileCopyDetailsInternal details) { visitedSymbolicLinks.add(details.getFile()); final TarArchiveEntry entry = new TarArchiveEntry(details.getRelativePath().getPathString(), TarConstants.LF_SYMLINK); entry.setModTime(getModTime(details)); - entry.setMode(UnixStat.LINK_FLAG | details.getMode()); + entry.setMode(UnixStat.LINK_FLAG | details.getPermissions().toUnixNumeric()); try { entry.setLinkName(Files.readSymbolicLink(details.getFile().toPath()).toString()); tar.putArchiveEntry(entry); @@ -197,7 +197,7 @@ private void visitSymbolicLink(final FileCopyDetailsInternal details) { private void visitDirectory(final FileCopyDetailsInternal details) { final TarArchiveEntry entry = new TarArchiveEntry(details.getRelativePath().getPathString() + "/"); entry.setModTime(getModTime(details)); - entry.setMode(UnixStat.DIR_FLAG | details.getMode()); + entry.setMode(UnixStat.DIR_FLAG | details.getPermissions().toUnixNumeric()); try { tar.putArchiveEntry(entry); tar.closeArchiveEntry(); @@ -209,7 +209,7 @@ private void visitDirectory(final FileCopyDetailsInternal details) { private void visitFile(final FileCopyDetailsInternal details) { final TarArchiveEntry entry = new TarArchiveEntry(details.getRelativePath().getPathString()); entry.setModTime(getModTime(details)); - entry.setMode(UnixStat.FILE_FLAG | details.getMode()); + entry.setMode(UnixStat.FILE_FLAG | details.getPermissions().toUnixNumeric()); entry.setSize(details.getSize()); try { tar.putArchiveEntry(entry); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/util/GradleUtils.java b/buildSrc/src/main/java/org/opensearch/gradle/util/GradleUtils.java index 031fee2d1127f..428b4a16748e1 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/util/GradleUtils.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/util/GradleUtils.java @@ -39,12 +39,17 @@ import org.gradle.api.UnknownTaskException; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.Dependency; +import org.gradle.api.artifacts.LenientConfiguration; +import org.gradle.api.file.FileCollection; +import org.gradle.api.internal.artifacts.ivyservice.ResolvedFilesCollectingVisitor; +import org.gradle.api.internal.artifacts.ivyservice.resolveengine.artifact.SelectedArtifactSet; import org.gradle.api.plugins.JavaBasePlugin; import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.provider.Provider; import org.gradle.api.services.BuildService; import org.gradle.api.services.BuildServiceRegistration; import org.gradle.api.services.BuildServiceRegistry; +import org.gradle.api.specs.Spec; import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.SourceSetContainer; import org.gradle.api.tasks.TaskContainer; @@ -53,6 +58,9 @@ import org.gradle.plugins.ide.eclipse.model.EclipseModel; import org.gradle.plugins.ide.idea.model.IdeaModel; +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -245,4 +253,22 @@ public static String getProjectPathFromTask(String taskPath) { int lastDelimiterIndex = taskPath.lastIndexOf(":"); return lastDelimiterIndex == 0 ? ":" : taskPath.substring(0, lastDelimiterIndex); } + + public static FileCollection getFiles(Project project, Configuration cfg, Spec spec) { + final LenientConfiguration configuration = cfg.getResolvedConfiguration().getLenientConfiguration(); + try { + // Using reflection here to cover the pre 8.7 releases (since those have no such APIs), the + // ResolverResults.LegacyResolverResults.LegacyVisitedArtifactSet::select(...) is not available + // on older versions. + final MethodHandle mh = MethodHandles.lookup() + .findVirtual(configuration.getClass(), "select", MethodType.methodType(SelectedArtifactSet.class, Spec.class)) + .bindTo(configuration); + + final ResolvedFilesCollectingVisitor visitor = new ResolvedFilesCollectingVisitor(); + ((SelectedArtifactSet) mh.invoke(spec)).visitArtifacts(visitor, false); + return project.files(visitor.getFiles()); + } catch (Throwable ex) { + return project.files(configuration.getFiles(spec)); + } + } } diff --git a/buildSrc/src/test/java/org/opensearch/gradle/precommit/DependencyLicensesTaskTests.java b/buildSrc/src/test/java/org/opensearch/gradle/precommit/DependencyLicensesTaskTests.java index bb216b27128e1..28513710470af 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/precommit/DependencyLicensesTaskTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/precommit/DependencyLicensesTaskTests.java @@ -344,7 +344,7 @@ private TaskProvider createDependencyLicensesTask(Projec .register("dependencyLicenses", DependencyLicensesTask.class, new Action() { @Override public void execute(DependencyLicensesTask dependencyLicensesTask) { - dependencyLicensesTask.setDependencies(getDependencies(project)); + dependencyLicensesTask.getDependencies().set(getDependencies(project)); final Map mappings = new HashMap<>(); mappings.put("from", "groovy-.*"); diff --git a/buildSrc/src/test/java/org/opensearch/gradle/precommit/UpdateShasTaskTests.java b/buildSrc/src/test/java/org/opensearch/gradle/precommit/UpdateShasTaskTests.java index 2deabb752017a..15d6d6cd4c31c 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/precommit/UpdateShasTaskTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/precommit/UpdateShasTaskTests.java @@ -102,7 +102,7 @@ public void whenDependencyExistsButShaNotThenShouldCreateNewShaFile() throws IOE public void whenDependencyAndWrongShaExistsThenShouldNotOverwriteShaFile() throws IOException, NoSuchAlgorithmException { project.getDependencies().add("someCompileConfiguration", dependency); - File groovyJar = task.getParentTask().getDependencies().getFiles().iterator().next(); + File groovyJar = task.getParentTask().getDependencies().get().getFiles().iterator().next(); String groovyShaName = groovyJar.getName() + ".sha1"; File groovySha = createFileIn(getLicensesDir(project), groovyShaName, "content"); @@ -162,7 +162,7 @@ private TaskProvider createDependencyLicensesTask(Projec .register("dependencyLicenses", DependencyLicensesTask.class, new Action() { @Override public void execute(DependencyLicensesTask dependencyLicensesTask) { - dependencyLicensesTask.setDependencies(getDependencies(project)); + dependencyLicensesTask.getDependencies().set(getDependencies(project)); } }); diff --git a/buildSrc/version.properties b/buildSrc/version.properties index c34409053b915..eb96261b056e3 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -29,12 +29,12 @@ hdrhistogram = 2.2.2 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.110.Final +netty = 4.1.111.Final joda = 2.12.7 # project reactor -reactor_netty = 1.1.19 -reactor = 3.5.17 +reactor_netty = 1.1.20 +reactor = 3.5.18 # client dependencies httpclient5 = 5.2.1 diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index 161b8008525b4..792b1ab57ddbc 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -39,11 +39,17 @@ CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String pla with libFiles() } into('config') { - dirMode 0750 - fileMode 0660 + dirPermissions { + unix 0750 + } + filePermissions { + unix 0660 + } with configFiles(distributionType, java) from { - dirMode 0750 + dirPermissions { + unix 0750 + } jvmOptionsDir.getParent() } } @@ -61,13 +67,17 @@ CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String pla } into('') { from { - dirMode 0755 + dirPermissions { + unix 0755 + } logsDir.getParent() } } into('') { from { - dirMode 0755 + dirPermissions { + unix 0755 + } pluginsDir.getParent() } } diff --git a/distribution/build.gradle b/distribution/build.gradle index 35ca84ca66dba..36efe2e0d45e8 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -363,9 +363,9 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { if (it.relativePath.segments[-2] == 'bin' || ((platform == 'darwin-x64' || platform == 'darwin-arm64') && it.relativePath.segments[-2] == 'MacOS')) { // bin files, wherever they are within modules (eg platform specific) should be executable // and MacOS is an alternative to bin on macOS - it.mode = 0755 + it.permissions(perm -> perm.unix(0755)) } else { - it.mode = 0644 + it.permissions(perm -> perm.unix(0644)) } } def buildModules = buildModulesTaskProvider @@ -413,7 +413,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { from '../src/bin' exclude '*.exe' exclude '*.bat' - eachFile { it.setMode(0755) } + eachFile { it.permissions(perm -> perm.unix(0755)) } MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, java)) } // windows files, only for zip @@ -431,7 +431,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } // module provided bin files with copySpec { - eachFile { it.setMode(0755) } + eachFile { it.permissions(perm -> perm.unix(0755)) } from project(':distribution').buildBin if (distributionType != 'zip') { exclude '*.bat' @@ -473,7 +473,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } eachFile { FileCopyDetails details -> if (details.relativePath.segments[-2] == 'bin' || details.relativePath.segments[-1] == 'jspawnhelper') { - details.mode = 0755 + details.permissions(perm -> perm.unix(0755)) } if (details.name == 'src.zip') { details.exclude() @@ -501,7 +501,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } eachFile { FileCopyDetails details -> if (details.relativePath.segments[-2] == 'bin' || details.relativePath.segments[-1] == 'jspawnhelper') { - details.mode = 0755 + details.permissions(perm -> perm.unix(0755)) } } } diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 211b3bd55da60..621620eef9d71 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -160,7 +160,9 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { } from(rootProject.projectDir) { include 'README.md' - fileMode 0644 + filePermissions { + unix 0644 + } } into('lib') { with libFiles() @@ -183,9 +185,9 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { directory('/' + segments[0..i].join('/'), 0755) } if (segments[-2] == 'bin' || segments[-1] == 'jspawnhelper') { - fcp.mode = 0755 + fcp.permissions(perm -> perm.unix(0755)) } else { - fcp.mode = 0644 + fcp.permissions(perm -> perm.unix(0644)) } } } @@ -195,7 +197,9 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { if (type == 'deb') { into("/usr/share/doc/${packageName}") { from "${packagingFiles}/copyright" - fileMode 0644 + filePermissions { + unix 0644 + } } } else { assert type == 'rpm' @@ -204,7 +208,9 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { include 'APACHE-LICENSE-2.0.txt' rename { 'LICENSE.txt' } } - fileMode 0644 + filePermissions { + unix 0644 + } } } @@ -213,7 +219,9 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { configurationFile '/etc/opensearch/jvm.options' configurationFile '/etc/opensearch/log4j2.properties' from("${packagingFiles}") { - dirMode 0750 + dirPermissions { + unix 0750 + } into('/etc') permissionGroup 'opensearch' includeEmptyDirs true @@ -223,8 +231,12 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { } from("${packagingFiles}/etc/opensearch") { into('/etc/opensearch') - dirMode 0750 - fileMode 0660 + dirPermissions { + unix 0750 + } + filePermissions{ + unix 0660 + } permissionGroup 'opensearch' includeEmptyDirs true createDirectoryEntry true @@ -235,34 +247,46 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { into(new File(envFile).getParent()) { fileType CONFIG | NOREPLACE permissionGroup 'opensearch' - fileMode 0660 + filePermissions { + unix 0660 + } from "${packagingFiles}/env/opensearch" } // ========= systemd ========= into('/usr/lib/tmpfiles.d') { from "${packagingFiles}/systemd/opensearch.conf" - fileMode 0644 + filePermissions { + unix 0644 + } } into('/usr/lib/systemd/system') { fileType CONFIG | NOREPLACE from "${packagingFiles}/systemd/opensearch.service" - fileMode 0644 + filePermissions { + unix 0644 + } } into('/usr/lib/sysctl.d') { fileType CONFIG | NOREPLACE from "${packagingFiles}/systemd/sysctl/opensearch.conf" - fileMode 0644 + filePermissions { + unix 0644 + } } into('/usr/share/opensearch/bin') { from "${packagingFiles}/systemd/systemd-entrypoint" - fileMode 0755 + filePermissions { + unix 0755 + } } // ========= sysV init ========= configurationFile '/etc/init.d/opensearch' into('/etc/init.d') { - fileMode 0750 + filePermissions { + unix 0750 + } fileType CONFIG | NOREPLACE from "${packagingFiles}/init.d/opensearch" } @@ -278,7 +302,9 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { createDirectoryEntry true user u permissionGroup g - dirMode mode + dirPermissions { + unix mode + } } } copyEmptyDir('/var/log/opensearch', 'opensearch', 'opensearch', 0750) @@ -341,7 +367,9 @@ Closure commonDebConfig(boolean jdk, String architecture) { into('/usr/share/lintian/overrides') { from('src/deb/lintian/opensearch') - fileMode 0644 + filePermissions { + unix 0644 + } } } } diff --git a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java index 726c381db09f6..af7138569972a 100644 --- a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java +++ b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java @@ -105,13 +105,8 @@ private static String javaLocaleProviders() { SPI setting is used to allow loading custom CalendarDataProvider in jdk8 it has to be loaded from jre/lib/ext, in jdk9+ it is already within ES project and on a classpath - - Due to internationalization enhancements in JDK 9 OpenSearch need to set the provider to COMPAT otherwise time/date - parsing will break in an incompatible way for some date patterns and locales. - //TODO COMPAT will be deprecated in at some point, see please https://bugs.openjdk.java.net/browse/JDK-8232906 - See also: documentation in server/org.opensearch.common.time.IsoCalendarDataProvider */ - return "-Djava.locale.providers=SPI,COMPAT"; + return "-Djava.locale.providers=SPI,CLDR"; } } diff --git a/gradle/ide.gradle b/gradle/ide.gradle index ea353f8d92bdd..e266d9add172d 100644 --- a/gradle/ide.gradle +++ b/gradle/ide.gradle @@ -81,7 +81,7 @@ if (System.getProperty('idea.active') == 'true') { } runConfigurations { defaults(JUnit) { - vmParameters = '-ea -Djava.locale.providers=SPI,COMPAT' + vmParameters = '-ea -Djava.locale.providers=SPI,CLDR' if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_17) { vmParameters += ' -Djava.security.manager=allow' } diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 9b0d73222260e..f5bdef81deb70 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.7-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.8-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=194717442575a6f96e1c1befa2c30e9a4fc90f701d7aee33eb879b79e7ff05c0 +distributionSha256Sum=f8b4f4772d302c8ff580bc40d0f56e715de69b163546944f787c87abf209c961 diff --git a/gradlew.bat b/gradlew.bat index 6689b85beecde..7101f8e4676fc 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -43,11 +43,11 @@ set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 if %ERRORLEVEL% equ 0 goto execute -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. +echo. 1>&2 +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 goto fail @@ -57,11 +57,11 @@ set JAVA_EXE=%JAVA_HOME%/bin/java.exe if exist "%JAVA_EXE%" goto execute -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. +echo. 1>&2 +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 goto fail diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 3f680b4ab8e05..d99dae2a5e64b 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -105,6 +105,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_14_0 = new Version(2140099, org.apache.lucene.util.Version.LUCENE_9_10_0); public static final Version V_2_14_1 = new Version(2140199, org.apache.lucene.util.Version.LUCENE_9_10_0); public static final Version V_2_15_0 = new Version(2150099, org.apache.lucene.util.Version.LUCENE_9_10_0); + public static final Version V_2_16_0 = new Version(2160099, org.apache.lucene.util.Version.LUCENE_9_11_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_11_0); public static final Version CURRENT = V_3_0_0; diff --git a/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java index 537caccbac652..783b6083e9226 100644 --- a/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java +++ b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java @@ -10,6 +10,7 @@ import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.search.SearchResponse; import org.opensearch.client.Client; @@ -20,6 +21,7 @@ import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.IndexSettings; import org.opensearch.index.cache.request.RequestCacheStats; import org.opensearch.index.query.QueryBuilders; import org.opensearch.indices.IndicesRequestCache; @@ -351,11 +353,15 @@ private void startIndex(Client client, String indexName) throws InterruptedExcep .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + // Disable index refreshing to avoid cache being invalidated mid-test + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1)) .build() ) .get() ); indexRandom(true, client.prepareIndex(indexName).setSource("k", "hello")); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge(indexName).setFlush(true).get(); ensureSearchable(indexName); } diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FingerprintProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FingerprintProcessor.java new file mode 100644 index 0000000000000..c2f59bf586c81 --- /dev/null +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FingerprintProcessor.java @@ -0,0 +1,279 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.common.Nullable; +import org.opensearch.common.hash.MessageDigests; +import org.opensearch.core.common.Strings; +import org.opensearch.ingest.AbstractProcessor; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; + +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.util.Base64; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.opensearch.ingest.ConfigurationUtils.newConfigurationException; + +/** + * Processor that generating hash value for the specified fields or fields not in the specified excluded list + */ +public final class FingerprintProcessor extends AbstractProcessor { + public static final String TYPE = "fingerprint"; + // this processor is introduced in 2.16.0, we append the OpenSearch version to the hash method name to ensure + // that this processor always generates same hash value based on a specific hash method, if the processing logic + // of this processor changes in future version, the version number in the hash method should be increased correspondingly. + private static final Set HASH_METHODS = Set.of("MD5@2.16.0", "SHA-1@2.16.0", "SHA-256@2.16.0", "SHA3-256@2.16.0"); + + // fields used to generate hash value + private final List fields; + // all fields other than the excluded fields are used to generate hash value + private final List excludeFields; + // the target field to store the hash value, defaults to fingerprint + private final String targetField; + // hash method used to generate the hash value, defaults to SHA-1 + private final String hashMethod; + private final boolean ignoreMissing; + + FingerprintProcessor( + String tag, + String description, + @Nullable List fields, + @Nullable List excludeFields, + String targetField, + String hashMethod, + boolean ignoreMissing + ) { + super(tag, description); + if (fields != null && !fields.isEmpty()) { + if (fields.stream().anyMatch(Strings::isNullOrEmpty)) { + throw new IllegalArgumentException("field name in [fields] cannot be null nor empty"); + } + if (excludeFields != null && !excludeFields.isEmpty()) { + throw new IllegalArgumentException("either fields or exclude_fields can be set"); + } + } + if (excludeFields != null && !excludeFields.isEmpty() && excludeFields.stream().anyMatch(Strings::isNullOrEmpty)) { + throw new IllegalArgumentException("field name in [exclude_fields] cannot be null nor empty"); + } + + if (!HASH_METHODS.contains(hashMethod.toUpperCase(Locale.ROOT))) { + throw new IllegalArgumentException("hash method must be MD5@2.16.0, SHA-1@2.16.0 or SHA-256@2.16.0 or SHA3-256@2.16.0"); + } + this.fields = fields; + this.excludeFields = excludeFields; + this.targetField = targetField; + this.hashMethod = hashMethod; + this.ignoreMissing = ignoreMissing; + } + + public List getFields() { + return fields; + } + + public List getExcludeFields() { + return excludeFields; + } + + public String getTargetField() { + return targetField; + } + + public String getHashMethod() { + return hashMethod; + } + + public boolean isIgnoreMissing() { + return ignoreMissing; + } + + @Override + public IngestDocument execute(IngestDocument document) { + // we should deduplicate and sort the field names to make sure we can get consistent hash value + final List sortedFields; + Set existingFields = new HashSet<>(document.getSourceAndMetadata().keySet()); + Set metadataFields = document.getMetadata() + .keySet() + .stream() + .map(IngestDocument.Metadata::getFieldName) + .collect(Collectors.toSet()); + // metadata fields such as _index, _id and _routing are ignored + if (fields != null && !fields.isEmpty()) { + sortedFields = fields.stream() + .distinct() + .filter(field -> !metadataFields.contains(field)) + .sorted() + .collect(Collectors.toList()); + } else if (excludeFields != null && !excludeFields.isEmpty()) { + sortedFields = existingFields.stream() + .filter(field -> !metadataFields.contains(field) && !excludeFields.contains(field)) + .sorted() + .collect(Collectors.toList()); + } else { + sortedFields = existingFields.stream().filter(field -> !metadataFields.contains(field)).sorted().collect(Collectors.toList()); + } + assert (!sortedFields.isEmpty()); + + final StringBuilder concatenatedFields = new StringBuilder(); + sortedFields.forEach(field -> { + if (!document.hasField(field)) { + if (ignoreMissing) { + return; + } else { + throw new IllegalArgumentException("field [" + field + "] doesn't exist"); + } + } + + final Object value = document.getFieldValue(field, Object.class); + if (value instanceof Map) { + @SuppressWarnings("unchecked") + Map flattenedMap = toFlattenedMap((Map) value); + flattenedMap.entrySet().stream().sorted(Map.Entry.comparingByKey()).forEach(entry -> { + String fieldValue = String.valueOf(entry.getValue()); + concatenatedFields.append("|") + .append(field) + .append(".") + .append(entry.getKey()) + .append("|") + .append(fieldValue.length()) + .append(":") + .append(fieldValue); + }); + } else { + String fieldValue = String.valueOf(value); + concatenatedFields.append("|").append(field).append("|").append(fieldValue.length()).append(":").append(fieldValue); + } + }); + // if all specified fields don't exist and ignore_missing is true, then do nothing + if (concatenatedFields.length() == 0) { + return document; + } + concatenatedFields.append("|"); + + MessageDigest messageDigest = HashMethod.fromMethodName(hashMethod); + assert (messageDigest != null); + messageDigest.update(concatenatedFields.toString().getBytes(StandardCharsets.UTF_8)); + document.setFieldValue(targetField, hashMethod + ":" + Base64.getEncoder().encodeToString(messageDigest.digest())); + + return document; + } + + @Override + public String getType() { + return TYPE; + } + + /** + * Convert a map containing nested fields to a flattened map, + * for example, if the original map is + * { + * "a": { + * "b": 1, + * "c": 2 + * } + * }, then the converted map is + * { + * "a.b": 1, + * "a.c": 2 + * } + * @param map the original map which may contain nested fields + * @return a flattened map which has only one level fields + */ + @SuppressWarnings("unchecked") + private Map toFlattenedMap(Map map) { + Map flattenedMap = new HashMap<>(); + for (Map.Entry entry : map.entrySet()) { + if (entry.getValue() instanceof Map) { + toFlattenedMap((Map) entry.getValue()).forEach( + (key, value) -> flattenedMap.put(entry.getKey() + "." + key, value) + ); + } else { + flattenedMap.put(entry.getKey(), entry.getValue()); + } + } + return flattenedMap; + } + + /** + * The supported hash methods used to generate hash value + */ + enum HashMethod { + MD5(MessageDigests.md5()), + SHA1(MessageDigests.sha1()), + SHA256(MessageDigests.sha256()), + SHA3256(MessageDigests.sha3256()); + + private final MessageDigest messageDigest; + + HashMethod(MessageDigest messageDigest) { + this.messageDigest = messageDigest; + } + + public static MessageDigest fromMethodName(String methodName) { + String name = methodName.toUpperCase(Locale.ROOT); + switch (name) { + case "MD5@2.16.0": + return MD5.messageDigest; + case "SHA-1@2.16.0": + return SHA1.messageDigest; + case "SHA-256@2.16.0": + return SHA256.messageDigest; + case "SHA3-256@2.16.0": + return SHA3256.messageDigest; + default: + return null; + } + } + } + + public static final class Factory implements Processor.Factory { + @Override + public FingerprintProcessor create( + Map registry, + String processorTag, + String description, + Map config + ) throws Exception { + List fields = ConfigurationUtils.readOptionalList(TYPE, processorTag, config, "fields"); + List excludeFields = ConfigurationUtils.readOptionalList(TYPE, processorTag, config, "exclude_fields"); + if (fields != null && !fields.isEmpty()) { + if (fields.stream().anyMatch(Strings::isNullOrEmpty)) { + throw newConfigurationException(TYPE, processorTag, "fields", "field name cannot be null nor empty"); + } + if (excludeFields != null && !excludeFields.isEmpty()) { + throw newConfigurationException(TYPE, processorTag, "fields", "either fields or exclude_fields can be set"); + } + } + if (excludeFields != null && !excludeFields.isEmpty() && excludeFields.stream().anyMatch(Strings::isNullOrEmpty)) { + throw newConfigurationException(TYPE, processorTag, "exclude_fields", "field name cannot be null nor empty"); + } + + String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field", "fingerprint"); + String hashMethod = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "hash_method", "SHA-1@2.16.0"); + if (!HASH_METHODS.contains(hashMethod.toUpperCase(Locale.ROOT))) { + throw newConfigurationException( + TYPE, + processorTag, + "hash_method", + "hash method must be MD5@2.16.0, SHA-1@2.16.0, SHA-256@2.16.0 or SHA3-256@2.16.0" + ); + } + boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); + return new FingerprintProcessor(processorTag, description, fields, excludeFields, targetField, hashMethod, ignoreMissing); + } + } +} diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java index 0f8b248fd5af8..162934efa6778 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java @@ -109,6 +109,7 @@ public Map getProcessors(Processor.Parameters paramet processors.put(CopyProcessor.TYPE, new CopyProcessor.Factory(parameters.scriptService)); processors.put(RemoveByPatternProcessor.TYPE, new RemoveByPatternProcessor.Factory()); processors.put(CommunityIdProcessor.TYPE, new CommunityIdProcessor.Factory()); + processors.put(FingerprintProcessor.TYPE, new FingerprintProcessor.Factory()); return Collections.unmodifiableMap(processors); } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/FingerprintProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/FingerprintProcessorFactoryTests.java new file mode 100644 index 0000000000000..74ad4cade7b37 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/FingerprintProcessorFactoryTests.java @@ -0,0 +1,119 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.OpenSearchParseException; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.hamcrest.CoreMatchers.equalTo; + +public class FingerprintProcessorFactoryTests extends OpenSearchTestCase { + + private FingerprintProcessor.Factory factory; + + @Before + public void init() { + factory = new FingerprintProcessor.Factory(); + } + + public void testCreate() throws Exception { + Map config = new HashMap<>(); + + List fields = null; + List excludeFields = null; + if (randomBoolean()) { + fields = List.of(randomAlphaOfLength(10)); + config.put("fields", fields); + } else { + excludeFields = List.of(randomAlphaOfLength(10)); + config.put("exclude_fields", excludeFields); + } + + String targetField = null; + if (randomBoolean()) { + targetField = randomAlphaOfLength(10); + } + config.put("target_field", targetField); + + boolean ignoreMissing = randomBoolean(); + config.put("ignore_missing", ignoreMissing); + String processorTag = randomAlphaOfLength(10); + FingerprintProcessor fingerprintProcessor = factory.create(null, processorTag, null, config); + assertThat(fingerprintProcessor.getTag(), equalTo(processorTag)); + assertThat(fingerprintProcessor.getFields(), equalTo(fields)); + assertThat(fingerprintProcessor.getExcludeFields(), equalTo(excludeFields)); + assertThat(fingerprintProcessor.getTargetField(), equalTo(Objects.requireNonNullElse(targetField, "fingerprint"))); + assertThat(fingerprintProcessor.isIgnoreMissing(), equalTo(ignoreMissing)); + } + + public void testCreateWithFields() throws Exception { + Map config = new HashMap<>(); + config.put("fields", List.of(randomAlphaOfLength(10))); + config.put("exclude_fields", List.of(randomAlphaOfLength(10))); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[fields] either fields or exclude_fields can be set")); + } + + config = new HashMap<>(); + List fields = new ArrayList<>(); + if (randomBoolean()) { + fields.add(null); + } else { + fields.add(""); + } + config.put("fields", fields); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[fields] field name cannot be null nor empty")); + } + + config = new HashMap<>(); + List excludeFields = new ArrayList<>(); + if (randomBoolean()) { + excludeFields.add(null); + } else { + excludeFields.add(""); + } + config.put("exclude_fields", excludeFields); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[exclude_fields] field name cannot be null nor empty")); + } + } + + public void testCreateWithHashMethod() throws Exception { + Map config = new HashMap<>(); + List fields = List.of(randomAlphaOfLength(10)); + config.put("fields", fields); + config.put("hash_method", randomAlphaOfLength(10)); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat( + e.getMessage(), + equalTo("[hash_method] hash method must be MD5@2.16.0, SHA-1@2.16.0, SHA-256@2.16.0 or SHA3-256@2.16.0") + ); + } + } +} diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/FingerprintProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/FingerprintProcessorTests.java new file mode 100644 index 0000000000000..67a82f28fb763 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/FingerprintProcessorTests.java @@ -0,0 +1,176 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; +import org.opensearch.ingest.RandomDocumentPicks; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; + +public class FingerprintProcessorTests extends OpenSearchTestCase { + private final List hashMethods = List.of("MD5@2.16.0", "SHA-1@2.16.0", "SHA-256@2.16.0", "SHA3-256@2.16.0"); + + public void testGenerateFingerprint() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + List fields = null; + List excludeFields = null; + if (randomBoolean()) { + fields = new ArrayList<>(); + for (int i = 0; i < randomIntBetween(1, 10); i++) { + fields.add(RandomDocumentPicks.addRandomField(random(), ingestDocument, randomAlphaOfLength(10))); + } + } else { + excludeFields = new ArrayList<>(); + for (int i = 0; i < randomIntBetween(1, 10); i++) { + excludeFields.add(RandomDocumentPicks.addRandomField(random(), ingestDocument, randomAlphaOfLength(10))); + } + } + + String targetField = "fingerprint"; + if (randomBoolean()) { + targetField = randomAlphaOfLength(10); + } + + String hashMethod = randomFrom(hashMethods); + Processor processor = createFingerprintProcessor(fields, excludeFields, targetField, hashMethod, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetField), equalTo(true)); + } + + public void testCreateFingerprintProcessorFailed() { + List fields = new ArrayList<>(); + if (randomBoolean()) { + fields.add(null); + } else { + fields.add(""); + } + fields.add(randomAlphaOfLength(10)); + + assertThrows( + "field name in [fields] cannot be null nor empty", + IllegalArgumentException.class, + () -> createFingerprintProcessor(fields, null, null, randomFrom(hashMethods), false) + ); + + List excludeFields = new ArrayList<>(); + if (randomBoolean()) { + excludeFields.add(null); + } else { + excludeFields.add(""); + } + excludeFields.add(randomAlphaOfLength(10)); + + assertThrows( + "field name in [exclude_fields] cannot be null nor empty", + IllegalArgumentException.class, + () -> createFingerprintProcessor(null, excludeFields, null, randomFrom(hashMethods), false) + ); + + assertThrows( + "either fields or exclude_fields can be set", + IllegalArgumentException.class, + () -> createFingerprintProcessor( + List.of(randomAlphaOfLength(10)), + List.of(randomAlphaOfLength(10)), + null, + randomFrom(hashMethods), + false + ) + ); + + assertThrows( + "hash method must be MD5@2.16.0, SHA-1@2.16.0, SHA-256@2.16.0 or SHA3-256@2.16.0", + IllegalArgumentException.class, + () -> createFingerprintProcessor(Collections.emptyList(), null, "fingerprint", randomAlphaOfLength(10), false) + ); + } + + public void testEmptyFieldAndExcludeFields() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + List fields = null; + List excludeFields = null; + if (randomBoolean()) { + fields = new ArrayList<>(); + } else { + excludeFields = new ArrayList<>(); + } + String targetField = "fingerprint"; + if (randomBoolean()) { + targetField = randomAlphaOfLength(10); + } + + String hashMethod = randomFrom(hashMethods); + Processor processor = createFingerprintProcessor(fields, excludeFields, targetField, hashMethod, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetField), equalTo(true)); + } + + public void testIgnoreMissing() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String nonExistingFieldName = RandomDocumentPicks.randomNonExistingFieldName(random(), ingestDocument); + List nonExistingFields = List.of(nonExistingFieldName); + Processor processor = createFingerprintProcessor(nonExistingFields, null, "fingerprint", randomFrom(hashMethods), false); + assertThrows( + "field [" + nonExistingFieldName + "] doesn't exist", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + + String targetField = "fingerprint"; + Processor processorWithIgnoreMissing = createFingerprintProcessor( + nonExistingFields, + null, + "fingerprint", + randomFrom(hashMethods), + true + ); + processorWithIgnoreMissing.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetField), equalTo(false)); + } + + public void testIgnoreMetadataFields() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + List metadataFields = ingestDocument.getMetadata() + .keySet() + .stream() + .map(IngestDocument.Metadata::getFieldName) + .collect(Collectors.toList()); + + String existingFieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, randomAlphaOfLength(10)); + List fields = List.of(existingFieldName, metadataFields.get(randomIntBetween(0, metadataFields.size() - 1))); + + String targetField = "fingerprint"; + String algorithm = randomFrom(hashMethods); + Processor processor = createFingerprintProcessor(fields, null, targetField, algorithm, false); + + processor.execute(ingestDocument); + String fingerprint = ingestDocument.getFieldValue(targetField, String.class); + + processor = createFingerprintProcessor(List.of(existingFieldName), null, targetField, algorithm, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue(targetField, String.class), equalTo(fingerprint)); + } + + private FingerprintProcessor createFingerprintProcessor( + List fields, + List excludeFields, + String targetField, + String hashMethod, + boolean ignoreMissing + ) { + return new FingerprintProcessor(randomAlphaOfLength(10), null, fields, excludeFields, targetField, hashMethod, ignoreMissing); + } +} diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml index 2a816f0386667..9bf4faf53a999 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml @@ -86,3 +86,19 @@ - do: nodes.info: {} - contains: { nodes.$cluster_manager.ingest.processors: { type: community_id } } + +--- +"Fingerprint processor exists": + - skip: + version: " - 2.15.99" + features: contains + reason: "fingerprint processor was introduced in 2.16.0 and contains is a newly added assertion" + - do: + cluster.state: {} + + # Get cluster-manager node id + - set: { cluster_manager_node: cluster_manager } + + - do: + nodes.info: {} + - contains: { nodes.$cluster_manager.ingest.processors: { type: fingerprint } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/340_fingerprint_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/340_fingerprint_processor.yml new file mode 100644 index 0000000000000..04568916239f4 --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/340_fingerprint_processor.yml @@ -0,0 +1,786 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "1" + ignore: 404 + +--- +"Test creat fingerprint processor": + - skip: + version: " - 2.15.99" + reason: "introduced in 2.16.0" + - do: + catch: /field name cannot be null nor empty/ + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "fields": [null] + } + } + ] + } + - do: + catch: /field name cannot be null nor empty/ + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "exclude_fields": [""] + } + } + ] + } + - do: + catch: /either fields or exclude\_fields can be set/ + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "fields": ["foo"], + "exclude_fields": ["bar"] + } + } + ] + } + + - do: + catch: /hash method must be MD5@2.16.0\, SHA\-1@2.16.0, SHA\-256@2.16.0 or SHA3\-256@2.16.0/ + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "fields": ["foo"], + "hash_method": "non-existing" + } + } + ] + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "fields" : ["foo"], + "target_field" : "fingerprint_field", + "hash_method": "SHA-256@2.16.0" + } + } + ] + } + - match: { acknowledged: true } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + } + } + ] + } + - match: { acknowledged: true } + +--- +"Test fingerprint processor with ignore_missing": + - skip: + version: " - 2.15.99" + reason: "introduced in 2.16.0" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "fields" : ["foo"] + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /field \[foo\] doesn't exist/ + index: + index: test + id: 1 + pipeline: "1" + body: { + bar: "bar" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "fields" : ["foo", "bar"], + "ignore_missing" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "foo" + } + - do: + get: + index: test + id: 1 + - match: { _source.fingerprint: "SHA-1@2.16.0:YqpBTuHXCPV04j/7lGfWeUl8Tyo=" } + +--- +"Test fingerprint processor with custom target field": + - skip: + version: " - 2.15.99" + reason: "introduced in 2.16.0" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "fields" : ["foo"], + "target_field" : "target" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "foo" + } + - do: + get: + index: test + id: 1 + - match: { _source.target: "SHA-1@2.16.0:YqpBTuHXCPV04j/7lGfWeUl8Tyo=" } + +--- +"Test fingerprint processor with non-primitive fields and SHA-1": + - skip: + version: " - 2.15.99" + reason: "introduced in 2.16.0" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "fields" : ["foo", "bar", "zoo"] + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: [1, 2, 3], + bar: { + field: { + innerField: "inner" + } + }, + zoo: null + } + - do: + get: + index: test + id: 1 + - match: { _source.fingerprint: "SHA-1@2.16.0:KYJ4pc4ouFmAbgZGp7CfNoykZeo=" } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 2 + pipeline: "1" + body: { + foo: [1, 2, 3], + bar: { + field: { + innerField: "inner" + } + }, + zoo: null + } + - do: + get: + index: test + id: 2 + - match: { _source.fingerprint: "SHA-1@2.16.0:KYJ4pc4ouFmAbgZGp7CfNoykZeo=" } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "fields":[] + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 3 + pipeline: "1" + body: { + foo: [1, 2, 3], + bar: { + field: { + innerField: "inner" + } + }, + zoo: null + } + - do: + get: + index: test + id: 3 + - match: { _source.fingerprint: "SHA-1@2.16.0:KYJ4pc4ouFmAbgZGp7CfNoykZeo=" } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "exclude_fields":[] + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 4 + pipeline: "1" + body: { + foo: [1, 2, 3], + bar: { + field: { + innerField: "inner" + } + }, + zoo: null + } + - do: + get: + index: test + id: 4 + - match: { _source.fingerprint: "SHA-1@2.16.0:KYJ4pc4ouFmAbgZGp7CfNoykZeo=" } + +--- +"Test fingerprint processor with non-primitive fields and MD5": + - skip: + version: " - 2.15.99" + reason: "introduced in 2.16.0" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "fields" : ["foo", "bar", "zoo"], + "hash_method" : "MD5@2.16.0" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: [1, 2, 3], + bar: { + field: { + innerField: "inner" + } + }, + zoo: null + } + - do: + get: + index: test + id: 1 + - match: { _source.fingerprint: "MD5@2.16.0:NovpcJ+MYHzEZtCewcDPTQ==" } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "hash_method" : "MD5@2.16.0" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 2 + pipeline: "1" + body: { + foo: [1, 2, 3], + bar: { + field: { + innerField: "inner" + } + }, + zoo: null + } + - do: + get: + index: test + id: 2 + - match: { _source.fingerprint: "MD5@2.16.0:NovpcJ+MYHzEZtCewcDPTQ==" } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "fields":[], + "hash_method" : "MD5@2.16.0" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 3 + pipeline: "1" + body: { + foo: [1, 2, 3], + bar: { + field: { + innerField: "inner" + } + }, + zoo: null + } + - do: + get: + index: test + id: 3 + - match: { _source.fingerprint: "MD5@2.16.0:NovpcJ+MYHzEZtCewcDPTQ==" } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "exclude_fields":[], + "hash_method" : "MD5@2.16.0" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 4 + pipeline: "1" + body: { + foo: [1, 2, 3], + bar: { + field: { + innerField: "inner" + } + }, + zoo: null + } + - do: + get: + index: test + id: 4 + - match: { _source.fingerprint: "MD5@2.16.0:NovpcJ+MYHzEZtCewcDPTQ==" } + + +--- +"Test fingerprint processor with non-primitive fields and SHA-256": + - skip: + version: " - 2.15.99" + reason: "introduced in 2.16.0" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "fields" : ["foo", "bar", "zoo"], + "hash_method" : "SHA-256@2.16.0" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: [1, 2, 3], + bar: { + field: { + innerField: "inner" + } + }, + zoo: null + } + - do: + get: + index: test + id: 1 + - match: { _source.fingerprint: "SHA-256@2.16.0:Sdlg0BodM3n1my4BvaTfJCPrvHxfrxno0kCLfMaC+XY=" } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "hash_method" : "SHA-256@2.16.0" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 2 + pipeline: "1" + body: { + foo: [1, 2, 3], + bar: { + field: { + innerField: "inner" + } + }, + zoo: null + } + - do: + get: + index: test + id: 2 + - match: { _source.fingerprint: "SHA-256@2.16.0:Sdlg0BodM3n1my4BvaTfJCPrvHxfrxno0kCLfMaC+XY=" } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "fields":[], + "hash_method" : "SHA-256@2.16.0" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 3 + pipeline: "1" + body: { + foo: [1, 2, 3], + bar: { + field: { + innerField: "inner" + } + }, + zoo: null + } + - do: + get: + index: test + id: 3 + - match: { _source.fingerprint: "SHA-256@2.16.0:Sdlg0BodM3n1my4BvaTfJCPrvHxfrxno0kCLfMaC+XY=" } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "exclude_fields":[], + "hash_method" : "SHA-256@2.16.0" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 4 + pipeline: "1" + body: { + foo: [1, 2, 3], + bar: { + field: { + innerField: "inner" + } + }, + zoo: null + } + - do: + get: + index: test + id: 4 + - match: { _source.fingerprint: "SHA-256@2.16.0:Sdlg0BodM3n1my4BvaTfJCPrvHxfrxno0kCLfMaC+XY=" } + +--- +"Test fingerprint processor with non-primitive fields and SHA3-256": + - skip: + version: " - 2.15.99" + reason: "introduced in 2.16.0" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "fields" : ["foo", "bar", "zoo"], + "hash_method" : "SHA3-256@2.16.0" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: [1, 2, 3], + bar: { + field: { + innerField: "inner" + } + }, + zoo: null + } + - do: + get: + index: test + id: 1 + - match: { _source.fingerprint: "SHA3-256@2.16.0:+GZCkMLEMkUA/4IrEZEZZYsVMbZdpJ92ppN3wUsFYOI=" } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "hash_method" : "SHA3-256@2.16.0" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 2 + pipeline: "1" + body: { + foo: [1, 2, 3], + bar: { + field: { + innerField: "inner" + } + }, + zoo: null + } + - do: + get: + index: test + id: 2 + - match: { _source.fingerprint: "SHA3-256@2.16.0:+GZCkMLEMkUA/4IrEZEZZYsVMbZdpJ92ppN3wUsFYOI=" } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "fields":[], + "hash_method" : "SHA3-256@2.16.0" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 3 + pipeline: "1" + body: { + foo: [1, 2, 3], + bar: { + field: { + innerField: "inner" + } + }, + zoo: null + } + - do: + get: + index: test + id: 3 + - match: { _source.fingerprint: "SHA3-256@2.16.0:+GZCkMLEMkUA/4IrEZEZZYsVMbZdpJ92ppN3wUsFYOI=" } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "fingerprint" : { + "exclude_fields":[], + "hash_method" : "SHA3-256@2.16.0" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 4 + pipeline: "1" + body: { + foo: [1, 2, 3], + bar: { + field: { + innerField: "inner" + } + }, + zoo: null + } + - do: + get: + index: test + id: 4 + - match: { _source.fingerprint: "SHA3-256@2.16.0:+GZCkMLEMkUA/4IrEZEZZYsVMbZdpJ92ppN3wUsFYOI=" } diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 deleted file mode 100644 index faaf70c858a6e..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3d918a9ee057d995c362902b54634fc307132aac \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..6784ac6c3b64f --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +b54863f578939e135d3b3aea610284ae57c188cf \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 deleted file mode 100644 index 7affbc14fa93a..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1fa43b03e93ab88e805b6a4e3e83780c80b47d2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..3d86194de9213 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +a6762ec00a6d268f9980741f5b755838bcd658bf \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 deleted file mode 100644 index 07730a5606ce2..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d05cd927209ea25bbf342962c00b8e5a828c2a4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..4ef1adb818300 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +c6ecbc452321e632bf3cea0f9758839b650455c7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 deleted file mode 100644 index ebd1e0d52efb2..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e0849843eb5b1c036b12551baca98a9f7ff847a0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..06c86b8fda557 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +f0cca5df75bfb4f858d0435f601d8b1cae1de054 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 deleted file mode 100644 index 568c0aa2a2c03..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec361e7e025c029be50c55c8480080cabcbc01e7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..16cb1cce7f504 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +58210befcb31adbcadd5724966a061444db91863 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 deleted file mode 100644 index 2d6050dd1e3a5..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -168db749c22652ee7fed1ebf7ec46ce856d75e51 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..2f70f791f65ed --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 deleted file mode 100644 index c3ee8087a8b5d..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -66c15921104cda0159b34e316541bc765dfaf3c0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..621cbf58f3133 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +3493179999f211dc49714319f81da2be86523a3b \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 deleted file mode 100644 index 32c8fa2b876a2..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b91f04c39ac14d6a29d07184ef305953ee6e0348 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..ac96e7545ed58 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +24e97cf14ea9d80afe4c5ab69066b587fccc154a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 deleted file mode 100644 index 2c468962b1b64..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a7096e7c0a25a983647909d7513f5d4943d589c0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..0847ac3034db7 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +acafc128cddafa021bc0b48b0788eb0e118add5e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 deleted file mode 100644 index c4ca8f15e85c5..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -381c5bf8b7570c163fa7893a26d02b7ac36ff6eb \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..5e3f819012811 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +f988dbb527efb0e7cf7d444cc50b0fc3f5f380ec \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 deleted file mode 100644 index ebd1e0d52efb2..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e0849843eb5b1c036b12551baca98a9f7ff847a0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..06c86b8fda557 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +f0cca5df75bfb4f858d0435f601d8b1cae1de054 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.110.Final.jar.sha1 deleted file mode 100644 index 9f6e95ba38d2e..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d54c8d5b95b14756043efb59b8c3e62ec67aa43 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..226ee06d39d6c --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +ea52ef6617a9b69b0baaebb7f0b80373527f9607 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.110.Final.jar.sha1 deleted file mode 100644 index f31396d94c2ec..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b7fb401dd47c79e6b99f2319ac3b561c50c31c30 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..dcc2b0c7ca923 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +1e459c8630bb7c942b79a97e62dd728798de6a8c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 deleted file mode 100644 index 18d122acd2c44..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3e687cdc4ecdbbad07508a11b715bdf95fa20939 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..b22ad6784809b --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +5ac6a3d96935129ba45ea768ad30e31cad0d8c4d \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 deleted file mode 100644 index 2c468962b1b64..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a7096e7c0a25a983647909d7513f5d4943d589c0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..0847ac3034db7 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +acafc128cddafa021bc0b48b0788eb0e118add5e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.19.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.19.jar.sha1 deleted file mode 100644 index cbcbfcd87d682..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.19.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -639e2c63ade6f2a49d7e501ca2264b74d240b448 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.20.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.20.jar.sha1 new file mode 100644 index 0000000000000..2f4d023c88c80 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.20.jar.sha1 @@ -0,0 +1 @@ +1a5ef52a470a82d9313e2e1ad8ba064bdbd38948 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.19.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.19.jar.sha1 deleted file mode 100644 index 1eeedfc0926f5..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.19.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4bbb1aeb64ecb2b3949c38983032a7f0b0ebd07 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.20.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.20.jar.sha1 new file mode 100644 index 0000000000000..6c031e00e39c1 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.20.jar.sha1 @@ -0,0 +1 @@ +8d4ee98405a5856cf0c9d7c1a70f3f14631e3c46 \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index eb50bd2d0615a..63eb783649884 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -74,7 +74,7 @@ dependencies { api "commons-codec:commons-codec:${versions.commonscodec}" api 'commons-collections:commons-collections:3.2.2' api "org.apache.commons:commons-compress:${versions.commonscompress}" - api 'org.apache.commons:commons-configuration2:2.10.1' + api 'org.apache.commons:commons-configuration2:2.11.0' api "commons-io:commons-io:${versions.commonsio}" api 'org.apache.commons:commons-lang3:3.14.0' implementation 'com.google.re2j:re2j:1.7' diff --git a/plugins/repository-hdfs/licenses/commons-configuration2-2.10.1.jar.sha1 b/plugins/repository-hdfs/licenses/commons-configuration2-2.10.1.jar.sha1 deleted file mode 100644 index d4c0f8417d357..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-configuration2-2.10.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b681b3bcddeaa5bf5c2a2939cd77e2f9ad6efda \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-configuration2-2.11.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-configuration2-2.11.0.jar.sha1 new file mode 100644 index 0000000000000..eea24804c5228 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-configuration2-2.11.0.jar.sha1 @@ -0,0 +1 @@ +af5a2c6abe587074c0be1107fcb27fa2fad91304 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.110.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.110.Final.jar.sha1 deleted file mode 100644 index 8f8d86e6065b2..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -db3f4d3ad3d16e26991a64d50b749ae09e0e0c8e \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.111.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..076124a7d1f89 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +8fba10bb4911517eb1bdcc05ef392499dda4d5ac \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.110.Final.jar.sha1 deleted file mode 100644 index faaf70c858a6e..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3d918a9ee057d995c362902b54634fc307132aac \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..6784ac6c3b64f --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +b54863f578939e135d3b3aea610284ae57c188cf \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.110.Final.jar.sha1 deleted file mode 100644 index 7affbc14fa93a..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1fa43b03e93ab88e805b6a4e3e83780c80b47d2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..3d86194de9213 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +a6762ec00a6d268f9980741f5b755838bcd658bf \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.110.Final.jar.sha1 deleted file mode 100644 index 07730a5606ce2..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d05cd927209ea25bbf342962c00b8e5a828c2a4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..4ef1adb818300 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +c6ecbc452321e632bf3cea0f9758839b650455c7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 deleted file mode 100644 index ebd1e0d52efb2..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e0849843eb5b1c036b12551baca98a9f7ff847a0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..06c86b8fda557 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +f0cca5df75bfb4f858d0435f601d8b1cae1de054 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.110.Final.jar.sha1 deleted file mode 100644 index 568c0aa2a2c03..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec361e7e025c029be50c55c8480080cabcbc01e7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..16cb1cce7f504 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +58210befcb31adbcadd5724966a061444db91863 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.110.Final.jar.sha1 deleted file mode 100644 index 2d6050dd1e3a5..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -168db749c22652ee7fed1ebf7ec46ce856d75e51 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..2f70f791f65ed --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.110.Final.jar.sha1 deleted file mode 100644 index c3ee8087a8b5d..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -66c15921104cda0159b34e316541bc765dfaf3c0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..621cbf58f3133 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +3493179999f211dc49714319f81da2be86523a3b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.110.Final.jar.sha1 deleted file mode 100644 index 32c8fa2b876a2..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b91f04c39ac14d6a29d07184ef305953ee6e0348 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..ac96e7545ed58 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +24e97cf14ea9d80afe4c5ab69066b587fccc154a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.110.Final.jar.sha1 deleted file mode 100644 index 408f3aa5d1339..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3ca1cff0bf82bfd38e89f6946e54f24cbb3424a2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..97001777eadf5 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +8b97d32eb1489043e478deea99bd93ce487b82f6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 deleted file mode 100644 index 2c468962b1b64..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a7096e7c0a25a983647909d7513f5d4943d589c0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..0847ac3034db7 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +acafc128cddafa021bc0b48b0788eb0e118add5e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.110.Final.jar.sha1 deleted file mode 100644 index faaf70c858a6e..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3d918a9ee057d995c362902b54634fc307132aac \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..6784ac6c3b64f --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +b54863f578939e135d3b3aea610284ae57c188cf \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.110.Final.jar.sha1 deleted file mode 100644 index 7affbc14fa93a..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1fa43b03e93ab88e805b6a4e3e83780c80b47d2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..3d86194de9213 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +a6762ec00a6d268f9980741f5b755838bcd658bf \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.110.Final.jar.sha1 deleted file mode 100644 index 07730a5606ce2..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d05cd927209ea25bbf342962c00b8e5a828c2a4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..4ef1adb818300 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +c6ecbc452321e632bf3cea0f9758839b650455c7 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.110.Final.jar.sha1 deleted file mode 100644 index 568c0aa2a2c03..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec361e7e025c029be50c55c8480080cabcbc01e7 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..16cb1cce7f504 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +58210befcb31adbcadd5724966a061444db91863 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.110.Final.jar.sha1 deleted file mode 100644 index 2d6050dd1e3a5..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -168db749c22652ee7fed1ebf7ec46ce856d75e51 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..2f70f791f65ed --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.110.Final.jar.sha1 deleted file mode 100644 index c3ee8087a8b5d..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -66c15921104cda0159b34e316541bc765dfaf3c0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..621cbf58f3133 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +3493179999f211dc49714319f81da2be86523a3b \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.110.Final.jar.sha1 deleted file mode 100644 index 32c8fa2b876a2..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b91f04c39ac14d6a29d07184ef305953ee6e0348 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..ac96e7545ed58 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +24e97cf14ea9d80afe4c5ab69066b587fccc154a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 deleted file mode 100644 index faaf70c858a6e..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3d918a9ee057d995c362902b54634fc307132aac \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..6784ac6c3b64f --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +b54863f578939e135d3b3aea610284ae57c188cf \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 deleted file mode 100644 index 7affbc14fa93a..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1fa43b03e93ab88e805b6a4e3e83780c80b47d2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..3d86194de9213 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +a6762ec00a6d268f9980741f5b755838bcd658bf \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 deleted file mode 100644 index c4ca8f15e85c5..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -381c5bf8b7570c163fa7893a26d02b7ac36ff6eb \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..5e3f819012811 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +f988dbb527efb0e7cf7d444cc50b0fc3f5f380ec \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 deleted file mode 100644 index 07730a5606ce2..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d05cd927209ea25bbf342962c00b8e5a828c2a4 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..4ef1adb818300 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +c6ecbc452321e632bf3cea0f9758839b650455c7 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 deleted file mode 100644 index ebd1e0d52efb2..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e0849843eb5b1c036b12551baca98a9f7ff847a0 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..06c86b8fda557 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +f0cca5df75bfb4f858d0435f601d8b1cae1de054 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 deleted file mode 100644 index 568c0aa2a2c03..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec361e7e025c029be50c55c8480080cabcbc01e7 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..16cb1cce7f504 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +58210befcb31adbcadd5724966a061444db91863 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 deleted file mode 100644 index 2d6050dd1e3a5..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -168db749c22652ee7fed1ebf7ec46ce856d75e51 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..2f70f791f65ed --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 deleted file mode 100644 index c3ee8087a8b5d..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -66c15921104cda0159b34e316541bc765dfaf3c0 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..621cbf58f3133 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +3493179999f211dc49714319f81da2be86523a3b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 deleted file mode 100644 index 18d122acd2c44..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3e687cdc4ecdbbad07508a11b715bdf95fa20939 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..b22ad6784809b --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +5ac6a3d96935129ba45ea768ad30e31cad0d8c4d \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 deleted file mode 100644 index 32c8fa2b876a2..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b91f04c39ac14d6a29d07184ef305953ee6e0348 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..ac96e7545ed58 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +24e97cf14ea9d80afe4c5ab69066b587fccc154a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 deleted file mode 100644 index 2c468962b1b64..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a7096e7c0a25a983647909d7513f5d4943d589c0 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 new file mode 100644 index 0000000000000..0847ac3034db7 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 @@ -0,0 +1 @@ +acafc128cddafa021bc0b48b0788eb0e118add5e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.19.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.19.jar.sha1 deleted file mode 100644 index cbcbfcd87d682..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.19.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -639e2c63ade6f2a49d7e501ca2264b74d240b448 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.20.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.20.jar.sha1 new file mode 100644 index 0000000000000..2f4d023c88c80 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.20.jar.sha1 @@ -0,0 +1 @@ +1a5ef52a470a82d9313e2e1ad8ba064bdbd38948 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.19.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.19.jar.sha1 deleted file mode 100644 index 1eeedfc0926f5..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.19.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4bbb1aeb64ecb2b3949c38983032a7f0b0ebd07 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.20.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.20.jar.sha1 new file mode 100644 index 0000000000000..6c031e00e39c1 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.20.jar.sha1 @@ -0,0 +1 @@ +8d4ee98405a5856cf0c9d7c1a70f3f14631e3c46 \ No newline at end of file diff --git a/release-notes/opensearch.release-notes-2.15.0.md b/release-notes/opensearch.release-notes-2.15.0.md new file mode 100644 index 0000000000000..e3b7cfc0558f3 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.15.0.md @@ -0,0 +1,75 @@ +## 2024-06-12 Version 2.15.0 Release Notes + +## [2.15.0] +### Added +- Add leader and follower check failure counter metrics ([#12439](https://github.com/opensearch-project/OpenSearch/pull/12439)) +- Add latency metrics for instrumenting critical clusterManager code paths ([#12333](https://github.com/opensearch-project/OpenSearch/pull/12333)) +- Add support for Azure Managed Identity in repository-azure ([#12423](https://github.com/opensearch-project/OpenSearch/issues/12423)) +- Add useCompoundFile index setting ([#13478](https://github.com/opensearch-project/OpenSearch/pull/13478)) +- Make outbound side of transport protocol dependent ([#13293](https://github.com/opensearch-project/OpenSearch/pull/13293)) +- [Remote Store] Upload translog checkpoint as object metadata to translog.tlog([#13637](https://github.com/opensearch-project/OpenSearch/pull/13637)) +- [Remote Store] Add dynamic cluster settings to set timeout for segments upload to Remote Store ([#13679](https://github.com/opensearch-project/OpenSearch/pull/13679)) +- Add getMetadataFields to MapperService ([#13819](https://github.com/opensearch-project/OpenSearch/pull/13819)) +- Add "wildcard" field type that supports efficient wildcard, prefix, and regexp queries ([#13461](https://github.com/opensearch-project/OpenSearch/pull/13461)) +- Allow setting query parameters on requests ([#13776](https://github.com/opensearch-project/OpenSearch/issues/13776)) +- Add dynamic action retry timeout setting ([#14022](https://github.com/opensearch-project/OpenSearch/issues/14022)) +- Add capability to disable source recovery_source for an index ([#13590](https://github.com/opensearch-project/OpenSearch/pull/13590)) +- Add remote routing table for remote state publication with experimental feature flag ([#13304](https://github.com/opensearch-project/OpenSearch/pull/13304)) +- Add upload flow for writing routing table to remote store ([#13870](https://github.com/opensearch-project/OpenSearch/pull/13870)) +- Add dynamic action retry timeout setting ([#14022](https://github.com/opensearch-project/OpenSearch/issues/14022)) +- [Remote Store] Add support to disable flush based on translog reader count ([#14027](https://github.com/opensearch-project/OpenSearch/pull/14027)) +- Add recovery chunk size setting ([#13997](https://github.com/opensearch-project/OpenSearch/pull/13997)) +- [Query Insights] Add exporter support for top n queries ([#12982](https://github.com/opensearch-project/OpenSearch/pull/12982)) +- [Query Insights] Add X-Opaque-Id to search request metadata for top n queries ([#13374](https://github.com/opensearch-project/OpenSearch/pull/13374)) +- [Streaming Indexing] Enhance RestAction with request / response streaming support ([#13772](https://github.com/opensearch-project/OpenSearch/pull/13772)) +- Move Remote Store Migration from DocRep to GA and modify remote migration settings name ([#14100](https://github.com/opensearch-project/OpenSearch/pull/14100)) +- [Remote State] Add async remote state deletion task running on an interval, configurable by a setting ([#13995](https://github.com/opensearch-project/OpenSearch/pull/13995)) +- Add remote routing table for remote state publication with experimental feature flag ([#13304](https://github.com/opensearch-project/OpenSearch/pull/13304)) +- Add support for query level resource usage tracking ([#13172](https://github.com/opensearch-project/OpenSearch/pull/13172)) +- [Query Insights] Add cpu and memory metrics to top n queries ([#13739](https://github.com/opensearch-project/OpenSearch/pull/13739)) +- Derived field object type support ([#13720](https://github.com/opensearch-project/OpenSearch/pull/13720)) +- Support Dynamic Pruning in Cardinality Aggregation ([#13821](https://github.com/opensearch-project/OpenSearch/pull/13821)) + +### Dependencies +- Bump `com.github.spullara.mustache.java:compiler` from 0.9.10 to 0.9.13 ([#13329](https://github.com/opensearch-project/OpenSearch/pull/13329), [#13559](https://github.com/opensearch-project/OpenSearch/pull/13559)) +- Bump `org.apache.commons:commons-text` from 1.11.0 to 1.12.0 ([#13557](https://github.com/opensearch-project/OpenSearch/pull/13557)) +- Bump `org.hdrhistogram:HdrHistogram` from 2.1.12 to 2.2.2 ([#13556](https://github.com/opensearch-project/OpenSearch/pull/13556), [#13986](https://github.com/opensearch-project/OpenSearch/pull/13986)) +- Bump `com.gradle.enterprise` from 3.17.2 to 3.17.4 ([#13641](https://github.com/opensearch-project/OpenSearch/pull/13641), [#13753](https://github.com/opensearch-project/OpenSearch/pull/13753)) +- Bump `org.apache.hadoop:hadoop-minicluster` from 3.3.6 to 3.4.0 ([#13642](https://github.com/opensearch-project/OpenSearch/pull/13642)) +- Bump `mockito` from 5.11.0 to 5.12.0 ([#13665](https://github.com/opensearch-project/OpenSearch/pull/13665)) +- Bump `com.google.code.gson:gson` from 2.10.1 to 2.11.0 ([#13752](https://github.com/opensearch-project/OpenSearch/pull/13752)) +- Bump `ch.qos.logback:logback-core` from 1.5.3 to 1.5.6 ([#13756](https://github.com/opensearch-project/OpenSearch/pull/13756)) +- Bump `netty` from 4.1.109.Final to 4.1.110.Final ([#13802](https://github.com/opensearch-project/OpenSearch/pull/13802)) +- Bump `jackson` from 2.17.0 to 2.17.1 ([#13817](https://github.com/opensearch-project/OpenSearch/pull/13817)) +- Bump `reactor` from 3.5.15 to 3.5.17 ([#13825](https://github.com/opensearch-project/OpenSearch/pull/13825)) +- Bump `reactor-netty` from 1.1.17 to 1.1.19 ([#13825](https://github.com/opensearch-project/OpenSearch/pull/13825)) +- Bump `commons-cli:commons-cli` from 1.7.0 to 1.8.0 ([#13840](https://github.com/opensearch-project/OpenSearch/pull/13840)) +- Bump `org.apache.xmlbeans:xmlbeans` from 5.2.0 to 5.2.1 ([#13839](https://github.com/opensearch-project/OpenSearch/pull/13839)) +- Bump `actions/checkout` from 3 to 4 ([#13935](https://github.com/opensearch-project/OpenSearch/pull/13935)) +- Bump `com.netflix.nebula.ospackage-base` from 11.9.0 to 11.9.1 ([#13933](https://github.com/opensearch-project/OpenSearch/pull/13933)) +- Bump `com.azure:azure-core-http-netty` from 1.12.8 to 1.15.1 ([#14128](https://github.com/opensearch-project/OpenSearch/pull/14128)) +- Bump `tim-actions/get-pr-commits` from 1.1.0 to 1.3.1 ([#14126](https://github.com/opensearch-project/OpenSearch/pull/14126)) + +### Changed +- Add ability for Boolean and date field queries to run when only doc_values are enabled ([#11650](https://github.com/opensearch-project/OpenSearch/pull/11650)) +- Refactor implementations of query phase searcher, allow QueryCollectorContext to have zero collectors ([#13481](https://github.com/opensearch-project/OpenSearch/pull/13481)) +- Adds support to inject telemetry instances to plugins ([#13636](https://github.com/opensearch-project/OpenSearch/pull/13636)) +- Adds support to provide tags with value in Gauge metric. ([#13994](https://github.com/opensearch-project/OpenSearch/pull/13994)) +- Move cache removal notifications outside lru lock ([#14017](https://github.com/opensearch-project/OpenSearch/pull/14017)) + +### Removed +- Remove handling of index.mapper.dynamic in AutoCreateIndex([#13067](https://github.com/opensearch-project/OpenSearch/pull/13067)) + +### Fixed +- Fix get field mapping API returns 404 error in mixed cluster with multiple versions ([#13624](https://github.com/opensearch-project/OpenSearch/pull/13624)) +- Allow clearing `remote_store.compatibility_mode` setting ([#13646](https://github.com/opensearch-project/OpenSearch/pull/13646)) +- Painless: ensure type "UnmodifiableMap" for params ([#13885](https://github.com/opensearch-project/OpenSearch/pull/13885)) +- Don't return negative scores from `multi_match` query with `cross_fields` type ([#13829](https://github.com/opensearch-project/OpenSearch/pull/13829)) +- Pass parent filter to inner hit query ([#13903](https://github.com/opensearch-project/OpenSearch/pull/13903)) +- Fix NPE on restore searchable snapshot ([#13911](https://github.com/opensearch-project/OpenSearch/pull/13911)) +- Fix double invocation of postCollection when MultiBucketCollector is present ([#14015](https://github.com/opensearch-project/OpenSearch/pull/14015)) +- Fix ReplicaShardBatchAllocator to batch shards without duplicates ([#13710](https://github.com/opensearch-project/OpenSearch/pull/13710)) +- Java high-level REST client bulk() is not respecting the bulkRequest.requireAlias(true) method call ([#14146](https://github.com/opensearch-project/OpenSearch/pull/14146)) +- Fix ShardNotFoundException during request cache clean up ([#14219](https://github.com/opensearch-project/OpenSearch/pull/14219)) +- Fix Concurrent Modification Exception in Indices Request Cache([#14032](https://github.com/opensearch-project/OpenSearch/pull/14221)) +- Fix the rewrite method for MatchOnlyText field query ([#14248](https://github.com/opensearch-project/OpenSearch/pull/14248)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml index fa71137912a91..996c2aae8cfe4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml @@ -658,6 +658,7 @@ setup: settings: number_of_replicas: 0 number_of_shards: 1 + refresh_interval: -1 mappings: properties: date: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml index 3a0099dae3b33..78e2e6858c6ff 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -1083,6 +1083,7 @@ setup: settings: number_of_replicas: 0 number_of_shards: 1 + refresh_interval: -1 mappings: properties: date: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml index 1356eac41ae79..fc82517788c91 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml @@ -106,8 +106,36 @@ setup: version: " - 2.99.99" reason: debug info for filter rewrite added in 3.0.0 (to be backported to 2.14.0) + - do: + indices.create: + index: test_profile + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + refresh_interval: -1 + mappings: + properties: + date: + type: date + + - do: + bulk: + index: test_profile + refresh: true + body: + - '{"index": {}}' + - '{"date": "2020-03-01", "v": 1}' + - '{"index": {}}' + - '{"date": "2020-03-02", "v": 2}' + - '{"index": {}}' + - '{"date": "2020-03-08", "v": 3}' + - '{"index": {}}' + - '{"date": "2020-03-09", "v": 4}' + - do: search: + index: test_profile body: profile: true size: 0 diff --git a/server/build.gradle b/server/build.gradle index 624e5fe332662..b8a99facbf964 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -356,14 +356,18 @@ tasks.named("thirdPartyAudit").configure { } tasks.named("dependencyLicenses").configure { + mapping from: /jackson-.*/, to: 'jackson' mapping from: /reactor-.*/, to: 'reactor' mapping from: /lucene-.*/, to: 'lucene' - dependencies = project.configurations.runtimeClasspath.fileCollection { - it.group.startsWith('org.opensearch') == false || - // keep the following org.opensearch jars in - (it.name == 'jna' || - it.name == 'securesm') - } + dependencies = project.configurations.runtimeClasspath.incoming.artifactView { + componentFilter { + it instanceof ModuleComponentIdentifier && + (it.group.startsWith('org.opensearch') == false || + // keep the following org.opensearch jars in + (it.name == 'jna' || + it.name == 'securesm')) + } + }.files } tasks.named("filepermissions").configure { diff --git a/server/licenses/jackson-LICENSE b/server/licenses/jackson-LICENSE new file mode 100644 index 0000000000000..f5f45d26a49d6 --- /dev/null +++ b/server/licenses/jackson-LICENSE @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/server/licenses/jackson-NOTICE b/server/licenses/jackson-NOTICE new file mode 100644 index 0000000000000..4c976b7b4cc58 --- /dev/null +++ b/server/licenses/jackson-NOTICE @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/server/licenses/jackson-core-2.17.1.jar.sha1 b/server/licenses/jackson-core-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..82dab5981e652 --- /dev/null +++ b/server/licenses/jackson-core-2.17.1.jar.sha1 @@ -0,0 +1 @@ +5e52a11644cd59a28ef79f02bddc2cc3bab45edb \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 b/server/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..ff42ed1f92cfe --- /dev/null +++ b/server/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 @@ -0,0 +1 @@ +ba5d8e6ecc62aa0e49c0ce935b8696352dbebc71 \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 b/server/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..47d19067cf2a6 --- /dev/null +++ b/server/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 @@ -0,0 +1 @@ +89683ac4f0a0c2c4f69ea56b90480ed40266dac8 \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 b/server/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..7946e994c7104 --- /dev/null +++ b/server/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 @@ -0,0 +1 @@ +b4c7b8a9ea3f398116a75c146b982b22afebc4ee \ No newline at end of file diff --git a/server/licenses/jopt-simple-5.0.4.jar.sha1 b/server/licenses/jopt-simple-5.0.4.jar.sha1 new file mode 100644 index 0000000000000..7ade81efe4d0d --- /dev/null +++ b/server/licenses/jopt-simple-5.0.4.jar.sha1 @@ -0,0 +1 @@ +4fdac2fbe92dfad86aa6e9301736f6b4342a3f5c \ No newline at end of file diff --git a/server/licenses/jopt-simple-LICENSE.txt b/server/licenses/jopt-simple-LICENSE.txt new file mode 100644 index 0000000000000..85f923a95268a --- /dev/null +++ b/server/licenses/jopt-simple-LICENSE.txt @@ -0,0 +1,24 @@ +/* + The MIT License + + Copyright (c) 2004-2015 Paul R. Holser, Jr. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ diff --git a/server/licenses/jopt-simple-NOTICE.txt b/server/licenses/jopt-simple-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/server/licenses/reactor-core-3.5.17.jar.sha1 b/server/licenses/reactor-core-3.5.17.jar.sha1 deleted file mode 100644 index 6663356bab047..0000000000000 --- a/server/licenses/reactor-core-3.5.17.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2cf9b080e3a2d8a5a39948260db5fd1dae54c3ac \ No newline at end of file diff --git a/server/licenses/reactor-core-3.5.18.jar.sha1 b/server/licenses/reactor-core-3.5.18.jar.sha1 new file mode 100644 index 0000000000000..c503f768beafa --- /dev/null +++ b/server/licenses/reactor-core-3.5.18.jar.sha1 @@ -0,0 +1 @@ +3a8157f7d66d71a407eb77ba12bce72a38c5b4da \ No newline at end of file diff --git a/server/licenses/snakeyaml-2.1.jar.sha1 b/server/licenses/snakeyaml-2.1.jar.sha1 new file mode 100644 index 0000000000000..5586b210a9736 --- /dev/null +++ b/server/licenses/snakeyaml-2.1.jar.sha1 @@ -0,0 +1 @@ +c79f47315517560b5bd6a62376ee385e48105437 \ No newline at end of file diff --git a/server/licenses/snakeyaml-LICENSE.txt b/server/licenses/snakeyaml-LICENSE.txt new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/server/licenses/snakeyaml-LICENSE.txt @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/server/licenses/snakeyaml-NOTICE.txt b/server/licenses/snakeyaml-NOTICE.txt new file mode 100644 index 0000000000000..b51464eee1f00 --- /dev/null +++ b/server/licenses/snakeyaml-NOTICE.txt @@ -0,0 +1,24 @@ +***The art of simplicity is a puzzle of complexity.*** + +## Overview ## +[YAML](http://yaml.org) is a data serialization format designed for human readability and interaction with scripting languages. + +SnakeYAML is a YAML processor for the Java Virtual Machine. + +## SnakeYAML features ## + +* a **complete** [YAML 1.1 processor](http://yaml.org/spec/1.1/current.html). In particular, SnakeYAML can parse all examples from the specification. +* Unicode support including UTF-8/UTF-16 input/output. +* high-level API for serializing and deserializing native Java objects. +* support for all types from the [YAML types repository](http://yaml.org/type/index.html). +* relatively sensible error messages. + +## Info ## + * [Changes](https://bitbucket.org/asomov/snakeyaml/wiki/Changes) + * [Documentation](https://bitbucket.org/asomov/snakeyaml/wiki/Documentation) + +## Contribute ## +* Mercurial DVCS is used to dance with the [source code](https://bitbucket.org/asomov/snakeyaml/src). +* If you find a bug in SnakeYAML, please [file a bug report](https://bitbucket.org/asomov/snakeyaml/issues?status=new&status=open). +* You may discuss SnakeYAML at +[the mailing list](http://groups.google.com/group/snakeyaml-core). \ No newline at end of file diff --git a/server/licenses/zstd-jni-1.5.5-5.jar.sha1 b/server/licenses/zstd-jni-1.5.5-5.jar.sha1 new file mode 100644 index 0000000000000..498c60c34e3da --- /dev/null +++ b/server/licenses/zstd-jni-1.5.5-5.jar.sha1 @@ -0,0 +1 @@ +74ffdc5f140080adacf5278287aadd950179f848 \ No newline at end of file diff --git a/server/licenses/zstd-jni-LICENSE.txt b/server/licenses/zstd-jni-LICENSE.txt new file mode 100644 index 0000000000000..c4dd507c1c72f --- /dev/null +++ b/server/licenses/zstd-jni-LICENSE.txt @@ -0,0 +1,29 @@ +----------------------------------------------------------------------------- +** Beginning of "BSD License" text. ** + +Zstd-jni: JNI bindings to Zstd Library + +Copyright (c) 2015-present, Luben Karavelov/ All rights reserved. + +BSD License + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/server/licenses/zstd-jni-NOTICE.txt b/server/licenses/zstd-jni-NOTICE.txt new file mode 100644 index 0000000000000..389c97cbc892d --- /dev/null +++ b/server/licenses/zstd-jni-NOTICE.txt @@ -0,0 +1 @@ +The code for the JNI bindings to Zstd library was originally authored by Luben Karavelov diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java index b33d57ed43189..beed6e6846b46 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java @@ -539,18 +539,7 @@ private void assertNodesRemovedAfterZoneDecommission(boolean originalClusterMana assertEquals(originalClusterManager, currentClusterManager); } - // Will wait for all events to complete - client(activeNode).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); - - // Recommissioning the zone back to gracefully succeed the test once above tests succeeds - DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(currentClusterManager).execute( - DeleteDecommissionStateAction.INSTANCE, - new DeleteDecommissionStateRequest() - ).get(); - assertTrue(deleteDecommissionStateResponse.isAcknowledged()); - - // will wait for cluster to stabilise with a timeout of 2 min as by then all nodes should have joined the cluster - ensureStableCluster(15, TimeValue.timeValueMinutes(2)); + deleteDecommissionStateAndWaitForStableCluster(currentClusterManager, 15); } public void testDecommissionFailedWhenDifferentAttributeAlreadyDecommissioned() throws Exception { @@ -617,18 +606,7 @@ public void testDecommissionFailedWhenDifferentAttributeAlreadyDecommissioned() ) ); - // Will wait for all events to complete - client(node_in_c).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); - - // Recommissioning the zone back to gracefully succeed the test once above tests succeeds - DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(node_in_c).execute( - DeleteDecommissionStateAction.INSTANCE, - new DeleteDecommissionStateRequest() - ).get(); - assertTrue(deleteDecommissionStateResponse.isAcknowledged()); - - // will wait for cluster to stabilise with a timeout of 2 min as by then all nodes should have joined the cluster - ensureStableCluster(6, TimeValue.timeValueMinutes(2)); + deleteDecommissionStateAndWaitForStableCluster(node_in_c, 6); } public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionException, InterruptedException { @@ -748,20 +726,7 @@ public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionEx ); logger.info("--> Verified the decommissioned node has in_progress state."); - // Will wait for all events to complete - client(activeNode).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); - logger.info("--> Got LANGUID event"); - // Recommissioning the zone back to gracefully succeed the test once above tests succeeds - DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(activeNode).execute( - DeleteDecommissionStateAction.INSTANCE, - new DeleteDecommissionStateRequest() - ).get(); - assertTrue(deleteDecommissionStateResponse.isAcknowledged()); - logger.info("--> Deleting decommission done."); - - // will wait for cluster to stabilise with a timeout of 2 min (findPeerInterval for decommissioned nodes) - // as by then all nodes should have joined the cluster - ensureStableCluster(6, TimeValue.timeValueSeconds(121)); + deleteDecommissionStateAndWaitForStableCluster(activeNode, 6); } public void testDecommissionFailedWhenAttributeNotWeighedAway() throws Exception { @@ -983,15 +948,7 @@ public void testDecommissionAcknowledgedIfWeightsNotSetForNonRoutingNode() throw assertEquals(clusterState.nodes().getDataNodes().size(), 3); assertEquals(clusterState.nodes().getClusterManagerNodes().size(), 2); - // Recommissioning the zone back to gracefully succeed the test once above tests succeeds - DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(dataNodes.get(0)).execute( - DeleteDecommissionStateAction.INSTANCE, - new DeleteDecommissionStateRequest() - ).get(); - assertTrue(deleteDecommissionStateResponse.isAcknowledged()); - - // will wait for cluster to stabilise with a timeout of 2 min as by then all nodes should have joined the cluster - ensureStableCluster(6, TimeValue.timeValueMinutes(2)); + deleteDecommissionStateAndWaitForStableCluster(dataNodes.get(0), 6); } public void testConcurrentDecommissionAction() throws Exception { @@ -1019,7 +976,7 @@ public void testConcurrentDecommissionAction() throws Exception { .build() ); logger.info("--> start 3 data nodes on zones 'a' & 'b' & 'c'"); - internalCluster().startNodes( + final String bZoneDataNode = internalCluster().startNodes( Settings.builder() .put(commonSettings) .put("node.attr.zone", "a") @@ -1035,7 +992,7 @@ public void testConcurrentDecommissionAction() throws Exception { .put("node.attr.zone", "c") .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) .build() - ); + ).get(1); ensureStableCluster(6); ClusterHealthResponse health = client().admin() @@ -1100,6 +1057,25 @@ public void testConcurrentDecommissionAction() throws Exception { assertEquals(concurrentRuns, numRequestAcknowledged.get() + numRequestUnAcknowledged.get() + numRequestFailed.get()); assertEquals(concurrentRuns - 1, numRequestFailed.get()); assertEquals(1, numRequestAcknowledged.get() + numRequestUnAcknowledged.get()); + + deleteDecommissionStateAndWaitForStableCluster(bZoneDataNode, 6); + } + + private void deleteDecommissionStateAndWaitForStableCluster(String activeNodeName, int expectedClusterSize) throws ExecutionException, + InterruptedException { + client(activeNodeName).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + + // Recommissioning the zone back to gracefully succeed the test once above tests succeeds + DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(activeNodeName).execute( + DeleteDecommissionStateAction.INSTANCE, + new DeleteDecommissionStateRequest() + ).get(); + assertTrue(deleteDecommissionStateResponse.isAcknowledged()); + logger.info("--> Deleting decommission done."); + + // will wait for cluster to stabilise with a timeout of 2 min (findPeerInterval for decommissioned nodes) + // as by then all nodes should have joined the cluster + ensureStableCluster(expectedClusterSize, TimeValue.timeValueSeconds(121)); } private static class WaitForFailedDecommissionState implements ClusterStateObserver.Listener { diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index bc0557ddc2afa..fc0a574c191b1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -34,6 +34,7 @@ import org.apache.lucene.index.CorruptIndexException; import org.opensearch.Version; +import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsAction; @@ -55,7 +56,9 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.AllocationDecision; import org.opensearch.cluster.routing.allocation.ExistingShardsAllocator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; @@ -98,6 +101,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; import java.util.stream.IntStream; import static java.util.Collections.emptyMap; @@ -105,8 +109,10 @@ import static org.opensearch.cluster.coordination.ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING; import static org.opensearch.cluster.health.ClusterHealthStatus.GREEN; import static org.opensearch.cluster.health.ClusterHealthStatus.RED; +import static org.opensearch.cluster.health.ClusterHealthStatus.YELLOW; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.gateway.GatewayRecoveryTestUtils.corruptShard; import static org.opensearch.gateway.GatewayRecoveryTestUtils.getDiscoveryNodes; @@ -753,6 +759,7 @@ public void testMessyElectionsStillMakeClusterGoGreen() throws Exception { Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") .build() ); @@ -843,6 +850,87 @@ public void testBatchModeDisabled() throws Exception { ensureGreen("test"); } + public void testMultipleReplicaShardAssignmentWithDelayedAllocationAndDifferentNodeStartTimeInBatchMode() throws Exception { + internalCluster().startClusterManagerOnlyNodes( + 1, + Settings.builder().put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.getKey(), true).build() + ); + internalCluster().startDataOnlyNodes(6); + createIndex( + "test", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 3) + .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "60m") + .build() + ); + ensureGreen("test"); + + List nodesWithReplicaShards = findNodesWithShard(false); + Settings replicaNode0DataPathSettings = internalCluster().dataPathSettings(nodesWithReplicaShards.get(0)); + Settings replicaNode1DataPathSettings = internalCluster().dataPathSettings(nodesWithReplicaShards.get(1)); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodesWithReplicaShards.get(0))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodesWithReplicaShards.get(1))); + + ensureStableCluster(5); + + logger.info("--> explicitly triggering reroute"); + ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + assertTrue(clusterRerouteResponse.isAcknowledged()); + + ClusterHealthResponse health = client().admin().cluster().health(Requests.clusterHealthRequest().timeout("5m")).actionGet(); + assertFalse(health.isTimedOut()); + assertEquals(YELLOW, health.getStatus()); + assertEquals(2, health.getUnassignedShards()); + // shard should be unassigned because of Allocation_Delayed + ClusterAllocationExplainResponse allocationExplainResponse = client().admin() + .cluster() + .prepareAllocationExplain() + .setIndex("test") + .setShard(0) + .setPrimary(false) + .get(); + assertEquals( + AllocationDecision.ALLOCATION_DELAYED, + allocationExplainResponse.getExplanation().getShardAllocationDecision().getAllocateDecision().getAllocationDecision() + ); + + logger.info("--> restarting the node 1"); + internalCluster().startDataOnlyNode( + Settings.builder().put("node.name", nodesWithReplicaShards.get(0)).put(replicaNode0DataPathSettings).build() + ); + clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + assertTrue(clusterRerouteResponse.isAcknowledged()); + ensureStableCluster(6); + waitUntil( + () -> client().admin().cluster().health(Requests.clusterHealthRequest().timeout("5m")).actionGet().getInitializingShards() == 0 + ); + + health = client().admin().cluster().health(Requests.clusterHealthRequest().timeout("5m")).actionGet(); + assertFalse(health.isTimedOut()); + assertEquals(YELLOW, health.getStatus()); + assertEquals(1, health.getUnassignedShards()); + assertEquals(1, health.getDelayedUnassignedShards()); + allocationExplainResponse = client().admin() + .cluster() + .prepareAllocationExplain() + .setIndex("test") + .setShard(0) + .setPrimary(false) + .get(); + assertEquals( + AllocationDecision.ALLOCATION_DELAYED, + allocationExplainResponse.getExplanation().getShardAllocationDecision().getAllocateDecision().getAllocationDecision() + ); + + logger.info("--> restarting the node 0"); + internalCluster().startDataOnlyNode( + Settings.builder().put("node.name", nodesWithReplicaShards.get(1)).put(replicaNode1DataPathSettings).build() + ); + ensureStableCluster(7); + ensureGreen("test"); + } + public void testNBatchesCreationAndAssignment() throws Exception { // we will reduce batch size to 5 to make sure we have enough batches to test assignment // Total number of primary shards = 50 (50 indices*1) @@ -1293,4 +1381,14 @@ private void prepareIndex(String indexName, int numberOfPrimaryShards) { index(indexName, "type", "1", Collections.emptyMap()); flush(indexName); } + + private List findNodesWithShard(final boolean primary) { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + List startedShards = state.routingTable().shardsWithState(ShardRoutingState.STARTED); + List requiredStartedShards = startedShards.stream() + .filter(startedShard -> startedShard.primary() == primary) + .collect(Collectors.toList()); + Collections.shuffle(requiredStartedShards, random()); + return requiredStartedShards.stream().map(shard -> state.nodes().get(shard.currentNodeId()).getName()).collect(Collectors.toList()); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java index e96dedaa3e6a0..5074971ab1a1f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java @@ -108,6 +108,9 @@ public void testRemoteCleanupDeleteStale() throws Exception { .add("cluster-state") .add(getClusterState().metadata().clusterUUID()); BlobPath manifestContainerPath = baseMetadataPath.add("manifest"); + RemoteClusterStateCleanupManager remoteClusterStateCleanupManager = internalCluster().getClusterManagerNodeInstance( + RemoteClusterStateCleanupManager.class + ); // set cleanup interval to 100 ms to make the test faster ClusterUpdateSettingsResponse response = client().admin() @@ -117,6 +120,7 @@ public void testRemoteCleanupDeleteStale() throws Exception { .get(); assertTrue(response.isAcknowledged()); + assertBusy(() -> assertEquals(100, remoteClusterStateCleanupManager.getStaleFileDeletionTask().getInterval().getMillis())); assertBusy(() -> { int manifestFiles = repository.blobStore().blobContainer(manifestContainerPath).listBlobsByPrefix("manifest").size(); @@ -128,7 +132,7 @@ public void testRemoteCleanupDeleteStale() throws Exception { "Current number of manifest files: " + manifestFiles, manifestFiles >= RETAINED_MANIFESTS && manifestFiles < RETAINED_MANIFESTS + 2 * SKIP_CLEANUP_STATE_CHANGES ); - }, 500, TimeUnit.MILLISECONDS); + }); // disable the clean up to avoid race condition during shutdown response = client().admin() diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java index ab2f0f0080566..f6c7355ea06f6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java @@ -26,13 +26,13 @@ import java.util.function.Function; import java.util.stream.Collectors; -import static org.opensearch.gateway.remote.RemoteClusterStateService.COORDINATION_METADATA; -import static org.opensearch.gateway.remote.RemoteClusterStateService.CUSTOM_METADATA; -import static org.opensearch.gateway.remote.RemoteClusterStateService.DELIMITER; -import static org.opensearch.gateway.remote.RemoteClusterStateService.METADATA_FILE_PREFIX; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; -import static org.opensearch.gateway.remote.RemoteClusterStateService.SETTING_METADATA; -import static org.opensearch.gateway.remote.RemoteClusterStateService.TEMPLATES_METADATA; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.METADATA_FILE_PREFIX; +import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA; +import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_METADATA; +import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTING_METADATA; +import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteClusterStateServiceIT extends RemoteStoreBaseIntegTestCase { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java index 0539f96e429c1..28bac3c7441b6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java @@ -13,6 +13,7 @@ import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; +import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.search.SearchResponse; import org.opensearch.client.Client; @@ -23,12 +24,14 @@ import org.opensearch.common.cache.stats.ImmutableCacheStats; import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.IndexSettings; import org.opensearch.index.cache.request.RequestCacheStats; import org.opensearch.index.query.QueryBuilders; import org.opensearch.test.OpenSearchIntegTestCase; @@ -266,10 +269,14 @@ private void startIndex(Client client, String indexName) throws InterruptedExcep .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + // Disable index refreshing to avoid cache being invalidated mid-test + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1)) ) .get() ); indexRandom(true, client.prepareIndex(indexName).setSource("k", "hello")); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge(indexName).setFlush(true).get(); ensureSearchable(indexName); } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 9888d2d8abd98..299652e4f07a9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -631,7 +631,6 @@ public void testCacheWithFilteredAlias() throws InterruptedException { assertCacheState(client, index, 2, 2); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/11374") public void testProfileDisableCache() throws Exception { Client client = client(); String index = "index"; @@ -674,7 +673,6 @@ public void testProfileDisableCache() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/12308") public void testCacheWithInvalidation() throws Exception { Client client = client(); String index = "index"; @@ -760,7 +758,6 @@ public void testCacheClearAPIRemovesStaleKeysWhenStalenessThresholdIsLow() throw } // when staleness threshold is lower than staleness, it should clean the stale keys from cache - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13540") public void testStaleKeysCleanupWithLowThreshold() throws Exception { int cacheCleanIntervalInMillis = 1; String node = internalCluster().startNode( @@ -807,7 +804,6 @@ public void testStaleKeysCleanupWithLowThreshold() throws Exception { } // when staleness threshold is equal to staleness, it should clean the stale keys from cache - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13503") public void testCacheCleanupOnEqualStalenessAndThreshold() throws Exception { int cacheCleanIntervalInMillis = 1; String node = internalCluster().startNode( @@ -986,7 +982,6 @@ public void testStaleKeysRemovalWithoutExplicitThreshold() throws Exception { } // when cache cleaner interval setting is not set, cache cleaner is configured appropriately with the fall-back setting - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13711") public void testCacheCleanupWithDefaultSettings() throws Exception { int cacheCleanIntervalInMillis = 1; String node = internalCluster().startNode( @@ -1027,7 +1022,6 @@ public void testCacheCleanupWithDefaultSettings() throws Exception { } // staleness threshold updates flows through to the cache cleaner - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13949") public void testDynamicStalenessThresholdUpdate() throws Exception { int cacheCleanIntervalInMillis = 1; String node = internalCluster().startNode( @@ -1175,7 +1169,6 @@ public void testCacheCleanupAfterIndexDeletion() throws Exception { } // when staleness threshold is lower than staleness, it should clean the cache from all indices having stale keys - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13437") public void testStaleKeysCleanupWithMultipleIndices() throws Exception { int cacheCleanIntervalInMillis = 10; String node = internalCluster().startNode( @@ -1230,7 +1223,6 @@ public void testStaleKeysCleanupWithMultipleIndices() throws Exception { }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13600") public void testDeleteAndCreateSameIndexShardOnSameNode() throws Exception { String node_1 = internalCluster().startNode(Settings.builder().build()); Client client = client(node_1); diff --git a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java index 9481a6116cdbc..dbde31ef1eb65 100644 --- a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java @@ -60,15 +60,18 @@ import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import org.hamcrest.MatcherAssert; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.stream.Collectors; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.test.NodeRoles.nonIngestNode; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; @@ -159,6 +162,14 @@ public void testSimulate() throws Exception { } public void testBulkWithIngestFailures() throws Exception { + runBulkTestWithRandomDocs(false); + } + + public void testBulkWithIngestFailuresWithBatchSize() throws Exception { + runBulkTestWithRandomDocs(true); + } + + private void runBulkTestWithRandomDocs(boolean shouldSetBatchSize) throws Exception { createIndex("index"); BytesReference source = BytesReference.bytes( @@ -177,6 +188,9 @@ public void testBulkWithIngestFailures() throws Exception { int numRequests = scaledRandomIntBetween(32, 128); BulkRequest bulkRequest = new BulkRequest(); + if (shouldSetBatchSize) { + bulkRequest.batchSize(numRequests); + } for (int i = 0; i < numRequests; i++) { IndexRequest indexRequest = new IndexRequest("index").id(Integer.toString(i)).setPipeline("_id"); indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field", "value", "fail", i % 2 == 0); @@ -209,6 +223,103 @@ public void testBulkWithIngestFailures() throws Exception { assertTrue(deletePipelineResponse.isAcknowledged()); } + public void testBulkWithIngestFailuresBatch() throws Exception { + createIndex("index"); + + BytesReference source = BytesReference.bytes( + jsonBuilder().startObject() + .field("description", "my_pipeline") + .startArray("processors") + .startObject() + .startObject("test") + .endObject() + .endObject() + .endArray() + .endObject() + ); + PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, MediaTypeRegistry.JSON); + client().admin().cluster().putPipeline(putPipelineRequest).get(); + + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.batchSize(2); + bulkRequest.add( + new IndexRequest("index").id("_fail").setPipeline("_id").source(Requests.INDEX_CONTENT_TYPE, "field", "value", "fail", true) + ); + bulkRequest.add( + new IndexRequest("index").id("_success").setPipeline("_id").source(Requests.INDEX_CONTENT_TYPE, "field", "value", "fail", false) + ); + + BulkResponse response = client().bulk(bulkRequest).actionGet(); + MatcherAssert.assertThat(response.getItems().length, equalTo(bulkRequest.requests().size())); + + Map results = Arrays.stream(response.getItems()) + .collect(Collectors.toMap(BulkItemResponse::getId, r -> r)); + + MatcherAssert.assertThat(results.keySet(), containsInAnyOrder("_fail", "_success")); + assertNotNull(results.get("_fail").getFailure()); + assertNull(results.get("_success").getFailure()); + + // verify field of successful doc + Map successDoc = client().prepareGet("index", "_success").get().getSourceAsMap(); + assertThat(successDoc.get("processed"), equalTo(true)); + + // cleanup + AcknowledgedResponse deletePipelineResponse = client().admin().cluster().prepareDeletePipeline("_id").get(); + assertTrue(deletePipelineResponse.isAcknowledged()); + } + + public void testBulkWithIngestFailuresAndDropBatch() throws Exception { + createIndex("index"); + + BytesReference source = BytesReference.bytes( + jsonBuilder().startObject() + .field("description", "my_pipeline") + .startArray("processors") + .startObject() + .startObject("test") + .endObject() + .endObject() + .endArray() + .endObject() + ); + PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, MediaTypeRegistry.JSON); + client().admin().cluster().putPipeline(putPipelineRequest).get(); + + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.batchSize(3); + bulkRequest.add( + new IndexRequest("index").id("_fail").setPipeline("_id").source(Requests.INDEX_CONTENT_TYPE, "field", "value", "fail", true) + ); + bulkRequest.add( + new IndexRequest("index").id("_success").setPipeline("_id").source(Requests.INDEX_CONTENT_TYPE, "field", "value", "fail", false) + ); + bulkRequest.add( + new IndexRequest("index").id("_drop").setPipeline("_id").source(Requests.INDEX_CONTENT_TYPE, "field", "value", "drop", true) + ); + + BulkResponse response = client().bulk(bulkRequest).actionGet(); + MatcherAssert.assertThat(response.getItems().length, equalTo(bulkRequest.requests().size())); + + Map results = Arrays.stream(response.getItems()) + .collect(Collectors.toMap(BulkItemResponse::getId, r -> r)); + + MatcherAssert.assertThat(results.keySet(), containsInAnyOrder("_fail", "_success", "_drop")); + assertNotNull(results.get("_fail").getFailure()); + assertNull(results.get("_success").getFailure()); + assertNull(results.get("_drop").getFailure()); + + // verify dropped doc not in index + assertNull(client().prepareGet("index", "_drop").get().getSourceAsMap()); + + // verify field of successful doc + Map successDoc = client().prepareGet("index", "_success").get().getSourceAsMap(); + assertThat(successDoc.get("processed"), equalTo(true)); + + // cleanup + AcknowledgedResponse deletePipelineResponse = client().admin().cluster().prepareDeletePipeline("_id").get(); + assertTrue(deletePipelineResponse.isAcknowledged()); + } + public void testBulkWithUpsert() throws Exception { createIndex("index"); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java index 901b36f872622..5be9b25512704 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java @@ -9,6 +9,8 @@ package org.opensearch.remotemigration; import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.opensearch.action.bulk.BulkRequest; @@ -16,11 +18,15 @@ import org.opensearch.action.delete.DeleteResponse; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.index.IndexResponse; +import org.opensearch.client.Requests; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.common.Priority; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Before; @@ -39,6 +45,7 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.opensearch.repositories.fs.ReloadableFsRepository.REPOSITORIES_FAILRATE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; public class MigrationBaseTestCase extends OpenSearchIntegTestCase { protected static final String REPOSITORY_NAME = "test-remote-store-repo"; @@ -114,6 +121,10 @@ public void initDocRepToRemoteMigration() { ); } + public ClusterHealthStatus ensureGreen(String... indices) { + return ensureGreen(TimeValue.timeValueSeconds(60), indices); + } + public BulkResponse indexBulk(String indexName, int numDocs) { BulkRequest bulkRequest = new BulkRequest(); for (int i = 0; i < numDocs; i++) { @@ -181,14 +192,12 @@ private Thread getIndexingThread() { long currentDocCount = indexedDocs.incrementAndGet(); if (currentDocCount > 0 && currentDocCount % refreshFrequency == 0) { if (rarely()) { - logger.info("--> [iteration {}] flushing index", currentDocCount); client().admin().indices().prepareFlush(indexName).get(); + logger.info("Completed ingestion of {} docs. Flushing now", currentDocCount); } else { - logger.info("--> [iteration {}] refreshing index", currentDocCount); client().admin().indices().prepareRefresh(indexName).get(); } } - logger.info("Completed ingestion of {} docs", currentDocCount); } }); } @@ -218,4 +227,38 @@ public void stopShardRebalancing() { .get() ); } + + public ClusterHealthStatus waitForRelocation() { + ClusterHealthRequest request = Requests.clusterHealthRequest() + .waitForNoRelocatingShards(true) + .timeout(TimeValue.timeValueSeconds(60)) + .waitForEvents(Priority.LANGUID); + ClusterHealthResponse actionGet = client().admin().cluster().health(request).actionGet(); + if (actionGet.isTimedOut()) { + logger.info( + "waitForRelocation timed out, cluster state:\n{}\n{}", + client().admin().cluster().prepareState().get().getState(), + client().admin().cluster().preparePendingClusterTasks().get() + ); + assertThat("timed out waiting for relocation", actionGet.isTimedOut(), equalTo(false)); + } + return actionGet.getStatus(); + } + + public ClusterHealthStatus waitForRelocation(TimeValue t) { + ClusterHealthRequest request = Requests.clusterHealthRequest() + .waitForNoRelocatingShards(true) + .timeout(t) + .waitForEvents(Priority.LANGUID); + ClusterHealthResponse actionGet = client().admin().cluster().health(request).actionGet(); + if (actionGet.isTimedOut()) { + logger.info( + "waitForRelocation timed out, cluster state:\n{}\n{}", + client().admin().cluster().prepareState().get().getState(), + client().admin().cluster().preparePendingClusterTasks().get() + ); + assertThat("timed out waiting for relocation", actionGet.isTimedOut(), equalTo(false)); + } + return actionGet.getStatus(); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java index cea653c0ead4b..fa3b9368ded47 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java @@ -99,16 +99,7 @@ public void testRemotePrimaryRelocation() throws Exception { .add(new MoveAllocationCommand("test", 0, primaryNodeName("test"), remoteNode)) .execute() .actionGet(); - ClusterHealthResponse clusterHealthResponse = client().admin() - .cluster() - .prepareHealth() - .setTimeout(TimeValue.timeValueSeconds(60)) - .setWaitForEvents(Priority.LANGUID) - .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); - - assertEquals(0, clusterHealthResponse.getRelocatingShards()); + waitForRelocation(); assertEquals(remoteNode, primaryNodeName("test")); logger.info("--> relocation from docrep to remote complete"); @@ -123,16 +114,7 @@ public void testRemotePrimaryRelocation() throws Exception { .add(new MoveAllocationCommand("test", 0, remoteNode, remoteNode2)) .execute() .actionGet(); - clusterHealthResponse = client().admin() - .cluster() - .prepareHealth() - .setTimeout(TimeValue.timeValueSeconds(60)) - .setWaitForEvents(Priority.LANGUID) - .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); - - assertEquals(0, clusterHealthResponse.getRelocatingShards()); + waitForRelocation(); assertEquals(remoteNode2, primaryNodeName("test")); logger.info("--> relocation from remote to remote complete"); @@ -155,7 +137,6 @@ public void testRemotePrimaryRelocation() throws Exception { public void testMixedModeRelocation_RemoteSeedingFail() throws Exception { String docRepNode = internalCluster().startNode(); - Client client = internalCluster().client(docRepNode); ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed")); assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteReplicaRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteReplicaRecoveryIT.java index aae726fe2a6bc..d6e25c0cab3ac 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteReplicaRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteReplicaRecoveryIT.java @@ -8,15 +8,12 @@ package org.opensearch.remotemigration; -import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; -import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.query.QueryBuilders; import org.opensearch.test.OpenSearchIntegTestCase; @@ -83,16 +80,8 @@ public void testReplicaRecovery() throws Exception { .add(new MoveAllocationCommand("test", 0, primaryNode, remoteNode)) .execute() .actionGet(); - ClusterHealthResponse clusterHealthResponse = client().admin() - .cluster() - .prepareHealth() - .setTimeout(TimeValue.timeValueSeconds(60)) - .setWaitForEvents(Priority.LANGUID) - .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); - assertEquals(0, clusterHealthResponse.getRelocatingShards()); + waitForRelocation(); logger.info("--> relocation of primary from docrep to remote complete"); logger.info("--> getting up the new replicas now to doc rep node as well as remote node "); @@ -109,17 +98,7 @@ public void testReplicaRecovery() throws Exception { ) .get(); - client().admin() - .cluster() - .prepareHealth() - .setTimeout(TimeValue.timeValueSeconds(60)) - .setWaitForEvents(Priority.LANGUID) - .setWaitForGreenStatus() - .execute() - .actionGet(); - logger.info("--> replica is up now on another docrep now as well as remote node"); - - assertEquals(0, clusterHealthResponse.getRelocatingShards()); + waitForRelocation(); asyncIndexingService.stopIndexing(); refresh("test"); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java index 4e4f6da56d622..e0e25db4ca722 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java @@ -8,13 +8,11 @@ package org.opensearch.remotemigration; -import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; @@ -28,6 +26,7 @@ import java.util.List; import java.util.Map; +import static org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -48,6 +47,10 @@ protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); } + protected int maximumNumberOfShards() { + return 5; + } + public void testMixedModeAddRemoteNodes() throws Exception { internalCluster().setBootstrapClusterManagerNodeIndex(0); List cmNodes = internalCluster().startNodes(1); @@ -155,7 +158,11 @@ public void testEndToEndRemoteMigration() throws Exception { internalCluster().setBootstrapClusterManagerNodeIndex(0); List docRepNodes = internalCluster().startNodes(2); ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); - updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed")); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + .put(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), maximumNumberOfShards()) + ); assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); client().admin().indices().prepareCreate("test").setSettings(indexSettings()).setMapping("field", "type=text").get(); ensureGreen("test"); @@ -189,16 +196,7 @@ public void testEndToEndRemoteMigration() throws Exception { ) .get() ); - - ClusterHealthResponse clusterHealthResponse = client().admin() - .cluster() - .prepareHealth() - .setTimeout(TimeValue.timeValueSeconds(45)) - .setWaitForEvents(Priority.LANGUID) - .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); - assertTrue(clusterHealthResponse.getRelocatingShards() == 0); + waitForRelocation(TimeValue.timeValueSeconds(90)); logger.info("---> Stopping indexing thread"); asyncIndexingService.stopIndexing(); Map shardCountByNodeId = getShardCountByNodeId(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index b22817ef19d1b..11260e0914dc5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -57,6 +57,7 @@ import static org.opensearch.cluster.metadata.Metadata.CLUSTER_READ_ONLY_BLOCK; import static org.opensearch.cluster.metadata.Metadata.SETTING_READ_ONLY_SETTING; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.encodeString; import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE; import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -326,9 +327,7 @@ public void testFullClusterRestoreManifestFilePointsToInvalidIndexMetadataPathTh // Step - 3 Delete index metadata file in remote try { Files.move( - segmentRepoPath.resolve( - RemoteClusterStateService.encodeString(clusterName) + "/cluster-state/" + prevClusterUUID + "/index" - ), + segmentRepoPath.resolve(encodeString(clusterName) + "/cluster-state/" + prevClusterUUID + "/index"), segmentRepoPath.resolve("cluster-state/") ); } catch (IOException e) { @@ -354,10 +353,7 @@ public void testRemoteStateFullRestart() throws Exception { try { Files.move( segmentRepoPath.resolve( - RemoteClusterStateService.encodeString(clusterService().state().getClusterName().value()) - + "/cluster-state/" - + prevClusterUUID - + "/manifest" + encodeString(clusterService().state().getClusterName().value()) + "/cluster-state/" + prevClusterUUID + "/manifest" ), segmentRepoPath.resolve("cluster-state/") ); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java index 4a8b00ea45738..4051bee3e4e5c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java @@ -187,4 +187,27 @@ public void testAggsOnEmptyShards() { // Validate non-global agg does not throw an exception assertSearchResponse(client().prepareSearch("idx").addAggregation(stats("value_stats").field("score")).get()); } + + public void testAggsWithTerminateAfter() throws InterruptedException { + assertAcked( + prepareCreate( + "terminate_index", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ).setMapping("f", "type=keyword").get() + ); + List docs = new ArrayList<>(); + for (int i = 0; i < randomIntBetween(5, 20); ++i) { + docs.add(client().prepareIndex("terminate_index").setSource("f", Integer.toString(i / 3))); + } + indexRandom(true, docs); + + SearchResponse response = client().prepareSearch("terminate_index") + .setSize(2) + .setTerminateAfter(1) + .addAggregation(terms("f").field("f")) + .get(); + assertSearchResponse(response); + assertTrue(response.isTerminatedEarly()); + assertEquals(response.getHits().getHits().length, 1); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java index db4ee3571d141..b2ed689622e7d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java @@ -34,6 +34,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; @@ -59,6 +60,7 @@ import static java.util.Collections.emptyMap; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CARDINALITY_AGGREGATION_PRUNING_THRESHOLD; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.cardinality; import static org.opensearch.search.aggregations.AggregationBuilders.global; @@ -255,6 +257,36 @@ public void testSingleValuedString() throws Exception { assertCount(count, numDocs); } + public void testDisableDynamicPruning() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value")) + .get(); + assertSearchResponse(response); + + Cardinality count1 = response.getAggregations().get("cardinality"); + + final ClusterUpdateSettingsResponse updateSettingResponse = client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CARDINALITY_AGGREGATION_PRUNING_THRESHOLD.getKey(), 0)) + .get(); + assertEquals(updateSettingResponse.getTransientSettings().get(CARDINALITY_AGGREGATION_PRUNING_THRESHOLD.getKey()), "0"); + + response = client().prepareSearch("idx") + .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value")) + .get(); + assertSearchResponse(response); + Cardinality count2 = response.getAggregations().get("cardinality"); + + assertEquals(count1, count2); + + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull(CARDINALITY_AGGREGATION_PRUNING_THRESHOLD.getKey())) + .get(); + } + public void testSingleValuedNumeric() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField())) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java index a58db51780826..01ad06757640c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java @@ -1914,14 +1914,8 @@ public void testRangeQueryWithTimeZone() throws Exception { * Test range with a custom locale, e.g. "de" in this case. Documents here mention the day of week * as "Mi" for "Mittwoch (Wednesday" and "Do" for "Donnerstag (Thursday)" and the month in the query * as "Dez" for "Dezember (December)". - * Note: this test currently needs the JVM arg `-Djava.locale.providers=SPI,COMPAT` to be set. - * When running with gradle this is done implicitly through the BuildPlugin, but when running from - * an IDE this might need to be set manually in the run configuration. See also CONTRIBUTING.md section - * on "Configuring IDEs And Running Tests". */ public void testRangeQueryWithLocaleMapping() throws Exception { - assert ("SPI,COMPAT".equals(System.getProperty("java.locale.providers"))) : "`-Djava.locale.providers=SPI,COMPAT` needs to be set"; - assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -1938,17 +1932,21 @@ public void testRangeQueryWithLocaleMapping() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("date_field", "Mi, 06 Dez 2000 02:55:00 -0800"), - client().prepareIndex("test").setId("2").setSource("date_field", "Do, 07 Dez 2000 02:55:00 -0800") + client().prepareIndex("test").setId("1").setSource("date_field", "Mi., 06 Dez. 2000 02:55:00 -0800"), + client().prepareIndex("test").setId("2").setSource("date_field", "Do., 07 Dez. 2000 02:55:00 -0800") ); SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Do, 07 Dez 2000 00:00:00 -0800")) + .setQuery( + QueryBuilders.rangeQuery("date_field").gte("Di., 05 Dez. 2000 02:55:00 -0800").lte("Do., 07 Dez. 2000 00:00:00 -0800") + ) .get(); assertHitCount(searchResponse, 1L); searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Fr, 08 Dez 2000 00:00:00 -0800")) + .setQuery( + QueryBuilders.rangeQuery("date_field").gte("Di., 05 Dez. 2000 02:55:00 -0800").lte("Fr., 08 Dez. 2000 00:00:00 -0800") + ) .get(); assertHitCount(searchResponse, 2L); } diff --git a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java index 72a3519aca6f8..4c76858107ed8 100644 --- a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java @@ -45,6 +45,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Objects; /** * Information passed during repository cleanup @@ -118,6 +119,24 @@ public Version getMinimalSupportedVersion() { return LegacyESVersion.fromId(7040099); } + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + RepositoryCleanupInProgress that = (RepositoryCleanupInProgress) o; + return entries.equals(that.entries); + } + + @Override + public int hashCode() { + return 31 + entries.hashCode(); + } + /** * Entry in the collection. * @@ -155,6 +174,23 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(repositoryStateId); } + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + RepositoryCleanupInProgress.Entry that = (RepositoryCleanupInProgress.Entry) o; + return repository.equals(that.repository) && repositoryStateId == that.repositoryStateId; + } + + @Override + public int hashCode() { + return Objects.hash(repository, repositoryStateId); + } + @Override public String toString() { return "{" + repository + '}' + '{' + repositoryStateId + '}'; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 232f900f25375..a0ef8de07fbf2 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -981,6 +981,10 @@ public static boolean isTemplatesMetadataEqual(Metadata metadata1, Metadata meta return metadata1.templates.equals(metadata2.templates); } + public static boolean isHashesOfConsistentSettingsEqual(Metadata metadata1, Metadata metadata2) { + return metadata1.hashesOfConsistentSettings.equals(metadata2.hashesOfConsistentSettings); + } + public static boolean isCustomMetadataEqual(Metadata metadata1, Metadata metadata2) { int customCount1 = 0; for (Map.Entry cursor : metadata1.customs.entrySet()) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java index e4095a84be081..6c7b94f316da2 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java @@ -79,7 +79,7 @@ public class RoutingTable implements Iterable, Diffable indicesRouting; - private RoutingTable(long version, final Map indicesRouting) { + public RoutingTable(long version, final Map indicesRouting) { this.version = version; this.indicesRouting = Collections.unmodifiableMap(indicesRouting); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java index 3864e282a310b..5ad3a2fd47ce3 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java @@ -584,10 +584,7 @@ private void allocateExistingUnassignedShards(RoutingAllocation allocation) { /* Use batch mode if enabled and there is no custom allocator set for Allocation service */ - Boolean batchModeEnabled = EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(settings); - if (batchModeEnabled - && allocation.nodes().getMinNodeVersion().onOrAfter(Version.V_2_14_0) - && existingShardsAllocators.size() == 2) { + if (isBatchModeEnabled(allocation)) { /* If we do not have any custom allocator set then we will be using ShardsBatchGatewayAllocator Currently AllocationService will not run any custom Allocator that implements allocateAllUnassignedShards @@ -724,13 +721,24 @@ private AllocateUnassignedDecision explainUnassignedShardAllocation(ShardRouting private ExistingShardsAllocator getAllocatorForShard(ShardRouting shardRouting, RoutingAllocation routingAllocation) { assert assertInitialized(); - final String allocatorName = ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.get( - routingAllocation.metadata().getIndexSafe(shardRouting.index()).getSettings() - ); + String allocatorName; + if (isBatchModeEnabled(routingAllocation)) { + allocatorName = ShardsBatchGatewayAllocator.ALLOCATOR_NAME; + } else { + allocatorName = ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.get( + routingAllocation.metadata().getIndexSafe(shardRouting.index()).getSettings() + ); + } final ExistingShardsAllocator existingShardsAllocator = existingShardsAllocators.get(allocatorName); return existingShardsAllocator != null ? existingShardsAllocator : new NotFoundAllocator(allocatorName); } + private boolean isBatchModeEnabled(RoutingAllocation routingAllocation) { + return EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(settings) + && routingAllocation.nodes().getMinNodeVersion().onOrAfter(Version.V_2_14_0) + && existingShardsAllocators.size() == 2; + } + private boolean assertInitialized() { assert existingShardsAllocators != null : "must have set allocators first"; return true; diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java index dcf106914c571..cc1b0713393f3 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.IndexInput; import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.ClusterState; @@ -32,8 +33,9 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.index.Index; import org.opensearch.gateway.remote.ClusterMetadataManifest; -import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.gateway.remote.RemoteStateTransferException; import org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable; import org.opensearch.index.remote.RemoteStoreEnums; import org.opensearch.index.remote.RemoteStorePathStrategy; @@ -43,15 +45,19 @@ import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ExecutorService; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteRoutingTableEnabled; /** @@ -87,7 +93,6 @@ public class InternalRemoteRoutingTableService extends AbstractLifecycleComponen public static final String INDEX_ROUTING_PATH_TOKEN = "index-routing"; public static final String INDEX_ROUTING_FILE_PREFIX = "index_routing"; - public static final String DELIMITER = "__"; public static final String INDEX_ROUTING_METADATA_PREFIX = "indexRouting--"; private static final Logger logger = LogManager.getLogger(InternalRemoteRoutingTableService.class); @@ -96,11 +101,13 @@ public class InternalRemoteRoutingTableService extends AbstractLifecycleComponen private BlobStoreRepository blobStoreRepository; private RemoteStoreEnums.PathType pathType; private RemoteStoreEnums.PathHashAlgorithm pathHashAlgo; + private ThreadPool threadPool; public InternalRemoteRoutingTableService( Supplier repositoriesService, Settings settings, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + ThreadPool threadpool ) { assert isRemoteRoutingTableEnabled(settings) : "Remote routing table is not enabled"; this.repositoriesService = repositoriesService; @@ -109,6 +116,7 @@ public InternalRemoteRoutingTableService( this.pathHashAlgo = clusterSettings.get(REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING); clusterSettings.addSettingsUpdateConsumer(REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING, this::setPathTypeSetting); clusterSettings.addSettingsUpdateConsumer(REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING, this::setPathHashAlgoSetting); + this.threadPool = threadpool; } private void setPathTypeSetting(RemoteStoreEnums.PathType pathType) { @@ -175,10 +183,7 @@ public CheckedRunnable getIndexRoutingAsyncAction( ) ), ex -> latchedActionListener.onFailure( - new RemoteClusterStateService.RemoteStateTransferException( - "Exception in writing index to remote store: " + indexRouting.getIndex().toString(), - ex - ) + new RemoteStateTransferException("Exception in writing index to remote store: " + indexRouting.getIndex().toString(), ex) ) ); @@ -268,6 +273,68 @@ private void uploadIndex( } } + @Override + public CheckedRunnable getAsyncIndexRoutingReadAction( + String uploadedFilename, + Index index, + LatchedActionListener latchedActionListener + ) { + int idx = uploadedFilename.lastIndexOf("/"); + String blobFileName = uploadedFilename.substring(idx + 1); + BlobContainer blobContainer = blobStoreRepository.blobStore() + .blobContainer(BlobPath.cleanPath().add(uploadedFilename.substring(0, idx))); + + return () -> readAsync( + blobContainer, + blobFileName, + index, + threadPool.executor(ThreadPool.Names.REMOTE_STATE_READ), + ActionListener.wrap( + response -> latchedActionListener.onResponse(response.getIndexRoutingTable()), + latchedActionListener::onFailure + ) + ); + } + + private void readAsync( + BlobContainer blobContainer, + String name, + Index index, + ExecutorService executorService, + ActionListener listener + ) { + executorService.execute(() -> { + try { + listener.onResponse(read(blobContainer, name, index)); + } catch (Exception e) { + listener.onFailure(e); + } + }); + } + + private RemoteIndexRoutingTable read(BlobContainer blobContainer, String path, Index index) { + try { + return new RemoteIndexRoutingTable(blobContainer.readBlob(path), index); + } catch (IOException | AssertionError e) { + logger.error(() -> new ParameterizedMessage("RoutingTable read failed for path {}", path), e); + throw new RemoteStateTransferException("Failed to read RemoteRoutingTable from Manifest with error ", e); + } + } + + @Override + public List getUpdatedIndexRoutingTableMetadata( + List updatedIndicesRouting, + List allIndicesRouting + ) { + return updatedIndicesRouting.stream().map(idx -> { + Optional uploadedIndexMetadataOptional = allIndicesRouting.stream() + .filter(idx2 -> idx2.getIndexName().equals(idx)) + .findFirst(); + assert uploadedIndexMetadataOptional.isPresent() == true; + return uploadedIndexMetadataOptional.get(); + }).collect(Collectors.toList()); + } + private String getIndexRoutingFileName(long term, long version) { return String.join( DELIMITER, @@ -300,4 +367,15 @@ protected void doStart() { @Override protected void doStop() {} + @Override + public void deleteStaleIndexRoutingPaths(List stalePaths) throws IOException { + try { + logger.debug(() -> "Deleting stale index routing files from remote - " + stalePaths); + blobStoreRepository.blobStore().blobContainer(BlobPath.cleanPath()).deleteBlobsIgnoringIfNotExists(stalePaths); + } catch (IOException e) { + logger.error(() -> new ParameterizedMessage("Failed to delete some stale index routing paths from {}", stalePaths), e); + throw e; + } + } + } diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java index b52c00f1f8576..6236d107d0220 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java @@ -16,6 +16,7 @@ import org.opensearch.common.CheckedRunnable; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.core.index.Index; import org.opensearch.gateway.remote.ClusterMetadataManifest; import java.io.IOException; @@ -57,6 +58,26 @@ public List getAllUploadedIndices List indicesRoutingUploaded, List indicesRoutingToDelete ) { + // noop + return List.of(); + } + + @Override + public CheckedRunnable getAsyncIndexRoutingReadAction( + String uploadedFilename, + Index index, + LatchedActionListener latchedActionListener + ) { + // noop + return () -> {}; + } + + @Override + public List getUpdatedIndexRoutingTableMetadata( + List updatedIndicesRouting, + List allIndicesRouting + ) { + // noop return List.of(); } @@ -74,4 +95,9 @@ protected void doStop() { protected void doClose() throws IOException { // noop } + + @Override + public void deleteStaleIndexRoutingPaths(List stalePaths) throws IOException { + // noop + } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java index c8ba91fab6871..d455dfb58eabc 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java @@ -18,6 +18,7 @@ import org.opensearch.common.lifecycle.LifecycleComponent; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.Index; import org.opensearch.gateway.remote.ClusterMetadataManifest; import java.io.IOException; @@ -25,7 +26,9 @@ import java.util.Map; /** - * Interface for RemoteRoutingTableService. Exposes methods to orchestrate upload and download of routing table from remote store. + * A Service which provides APIs to upload and download routing table from remote store. + * + * @opensearch.internal */ public interface RemoteRoutingTableService extends LifecycleComponent { public static final DiffableUtils.NonDiffableValueSerializer CUSTOM_ROUTING_TABLE_VALUE_SERIALIZER = @@ -43,6 +46,17 @@ public IndexRoutingTable read(StreamInput in, String key) throws IOException { List getIndicesRouting(RoutingTable routingTable); + CheckedRunnable getAsyncIndexRoutingReadAction( + String uploadedFilename, + Index index, + LatchedActionListener latchedActionListener + ); + + List getUpdatedIndexRoutingTableMetadata( + List updatedIndicesRouting, + List allIndicesRouting + ); + DiffableUtils.MapDiff> getIndicesRoutingMapDiff( RoutingTable before, RoutingTable after @@ -61,4 +75,6 @@ List getAllUploadedIndicesRouting List indicesRoutingToDelete ); + public void deleteStaleIndexRoutingPaths(List stalePaths) throws IOException; + } diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactory.java b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactory.java index 49f90fa261f27..82837191a30b7 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactory.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactory.java @@ -11,6 +11,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.repositories.RepositoriesService; +import org.opensearch.threadpool.ThreadPool; import java.util.function.Supplier; @@ -26,15 +27,17 @@ public class RemoteRoutingTableServiceFactory { * @param repositoriesService repositoriesService * @param settings settings * @param clusterSettings clusterSettings + * @param threadPool threadPool * @return RemoteRoutingTableService */ public static RemoteRoutingTableService getService( Supplier repositoriesService, Settings settings, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + ThreadPool threadPool ) { if (isRemoteRoutingTableEnabled(settings)) { - return new InternalRemoteRoutingTableService(repositoriesService, settings, clusterSettings); + return new InternalRemoteRoutingTableService(repositoriesService, settings, clusterSettings, threadPool); } return new NoopRemoteRoutingTableService(); } diff --git a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java index dbd78a2584f9c..db23e7b877596 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java @@ -115,6 +115,29 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + @Override + public String toString() { + return Fields.HIT_COUNT + + "=" + + hits + + ", " + + Fields.MISS_COUNT + + "=" + + misses + + ", " + + Fields.EVICTIONS + + "=" + + evictions + + ", " + + Fields.SIZE_IN_BYTES + + "=" + + sizeInBytes + + ", " + + Fields.ITEM_COUNT + + "=" + + items; + } + /** * Field names used to write the values in this object to XContent. */ diff --git a/server/src/main/java/org/opensearch/common/hash/MessageDigests.java b/server/src/main/java/org/opensearch/common/hash/MessageDigests.java index f53f60a3a97a3..123bd3489bedb 100644 --- a/server/src/main/java/org/opensearch/common/hash/MessageDigests.java +++ b/server/src/main/java/org/opensearch/common/hash/MessageDigests.java @@ -58,6 +58,7 @@ private static ThreadLocal createThreadLocalMessageDigest(String private static final ThreadLocal MD5_DIGEST = createThreadLocalMessageDigest("MD5"); private static final ThreadLocal SHA_1_DIGEST = createThreadLocalMessageDigest("SHA-1"); private static final ThreadLocal SHA_256_DIGEST = createThreadLocalMessageDigest("SHA-256"); + private static final ThreadLocal SHA3_256_DIGEST = createThreadLocalMessageDigest("SHA3-256"); /** * Returns a {@link MessageDigest} instance for MD5 digests; note @@ -95,6 +96,18 @@ public static MessageDigest sha256() { return get(SHA_256_DIGEST); } + /** + * Returns a {@link MessageDigest} instance for SHA3-256 digests; + * note that the instance returned is thread local and must not be + * shared amongst threads. + * + * @return a thread local {@link MessageDigest} instance that + * provides SHA3-256 message digest functionality. + */ + public static MessageDigest sha3256() { + return get(SHA3_256_DIGEST); + } + private static MessageDigest get(ThreadLocal messageDigest) { MessageDigest instance = messageDigest.get(); instance.reset(); diff --git a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java b/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java index 632b2b70d61df..23fc9d3ad77cb 100644 --- a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java +++ b/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java @@ -42,6 +42,8 @@ public AbstractRemoteWritableBlobEntity( public abstract BlobPathParameters getBlobPathParameters(); + public abstract String getType(); + public String getFullBlobName() { return blobName; } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 335615a6affb7..7ea04acf00415 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -181,6 +181,10 @@ import java.util.Set; import java.util.function.Predicate; +import static org.opensearch.gateway.remote.RemoteGlobalMetadataManager.GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING; +import static org.opensearch.gateway.remote.RemoteIndexMetadataManager.INDEX_METADATA_UPLOAD_TIMEOUT_SETTING; +import static org.opensearch.gateway.remote.RemoteManifestManager.METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING; + /** * Encapsulates all valid cluster level settings. * @@ -536,6 +540,7 @@ public void apply(Settings value, Settings current, Settings previous) { SearchService.MAX_OPEN_PIT_CONTEXT, SearchService.MAX_PIT_KEEPALIVE_SETTING, SearchService.MAX_AGGREGATION_REWRITE_FILTERS, + SearchService.CARDINALITY_AGGREGATION_PRUNING_THRESHOLD, CreatePitController.PIT_INIT_KEEP_ALIVE, Node.WRITE_PORTS_FILE_SETTING, Node.NODE_NAME_SETTING, @@ -717,9 +722,10 @@ public void apply(Settings value, Settings current, Settings previous) { // Remote cluster state settings RemoteClusterStateCleanupManager.REMOTE_CLUSTER_STATE_CLEANUP_INTERVAL_SETTING, RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING, - RemoteClusterStateService.INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, - RemoteClusterStateService.GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, - RemoteClusterStateService.METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, + INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, + GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, + METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, + RemoteClusterStateService.REMOTE_STATE_READ_TIMEOUT_SETTING, RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, diff --git a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java index eed5de65258fc..58982e869794f 100644 --- a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingNode; -import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.opensearch.cluster.routing.allocation.AllocationDecision; @@ -46,9 +45,7 @@ import org.opensearch.cluster.routing.allocation.decider.Decision; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; -import java.util.stream.Collectors; /** * An abstract class that implements basic functionality for allocating @@ -81,38 +78,7 @@ public void allocateUnassigned( executeDecision(shardRouting, allocateUnassignedDecision, allocation, unassignedAllocationHandler); } - /** - * Allocate Batch of unassigned shard to nodes where valid copies of the shard already exists - * @param shardRoutings the shards to allocate - * @param allocation the allocation state container object - */ - public void allocateUnassignedBatch(List shardRoutings, RoutingAllocation allocation) { - // make Allocation Decisions for all shards - HashMap decisionMap = makeAllocationDecision(shardRoutings, allocation, logger); - assert shardRoutings.size() == decisionMap.size() : "make allocation decision didn't return allocation decision for " - + "some shards"; - // get all unassigned shards iterator - RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); - - while (iterator.hasNext()) { - ShardRouting shard = iterator.next(); - try { - if (decisionMap.isEmpty() == false) { - if (decisionMap.containsKey(shard)) { - executeDecision(shard, decisionMap.remove(shard), allocation, iterator); - } - } else { - // no need to keep iterating the unassigned shards, if we don't have anything in decision map - break; - } - } catch (Exception e) { - logger.error("Failed to execute decision for shard {} while initializing {}", shard, e); - throw e; - } - } - } - - private void executeDecision( + protected void executeDecision( ShardRouting shardRouting, AllocateUnassignedDecision allocateUnassignedDecision, RoutingAllocation allocation, @@ -135,8 +101,6 @@ private void executeDecision( } } - public void allocateUnassignedBatch(String batchId, RoutingAllocation allocation) {} - protected long getExpectedShardSize(ShardRouting shardRouting, RoutingAllocation allocation) { if (shardRouting.primary()) { if (shardRouting.recoverySource().getType() == RecoverySource.Type.SNAPSHOT) { @@ -165,21 +129,6 @@ public abstract AllocateUnassignedDecision makeAllocationDecision( Logger logger ); - public HashMap makeAllocationDecision( - List unassignedShardBatch, - RoutingAllocation allocation, - Logger logger - ) { - - return (HashMap) unassignedShardBatch.stream() - .collect( - Collectors.toMap( - unassignedShard -> unassignedShard, - unassignedShard -> makeAllocationDecision(unassignedShard, allocation, logger) - ) - ); - } - /** * Builds decisions for all nodes in the cluster, so that the explain API can provide information on * allocation decisions for each node, while still waiting to allocate the shard (e.g. due to fetching shard data). diff --git a/server/src/main/java/org/opensearch/gateway/PrimaryShardBatchAllocator.java b/server/src/main/java/org/opensearch/gateway/PrimaryShardBatchAllocator.java index 1979f33484d49..27f9bedc4e495 100644 --- a/server/src/main/java/org/opensearch/gateway/PrimaryShardBatchAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/PrimaryShardBatchAllocator.java @@ -14,6 +14,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.gateway.AsyncShardFetch.FetchResult; import org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.GatewayStartedShard; import org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.NodeGatewayStartedShard; @@ -61,50 +62,59 @@ protected FetchResult shardsState = fetchData( + List.of(unassignedShard), + Collections.emptyList(), + allocation + ); + List nodeGatewayStartedShards = adaptToNodeShardStates(unassignedShard, shardsState); + return getAllocationDecision(unassignedShard, allocation, nodeGatewayStartedShards, logger); } /** - * Build allocation decisions for all the shards present in the batch identified by batchId. + * Allocate Batch of unassigned shard to nodes where valid copies of the shard already exists * - * @param shards set of shards given for allocation - * @param allocation current allocation of all the shards - * @param logger logger used for logging - * @return shard to allocation decision map + * @param shardRoutings the shards to allocate + * @param allocation the allocation state container object */ - @Override - public HashMap makeAllocationDecision( - List shards, - RoutingAllocation allocation, - Logger logger - ) { - HashMap shardAllocationDecisions = new HashMap<>(); + public void allocateUnassignedBatch(List shardRoutings, RoutingAllocation allocation) { + HashMap ineligibleShardAllocationDecisions = new HashMap<>(); List eligibleShards = new ArrayList<>(); List inEligibleShards = new ArrayList<>(); // identify ineligible shards - for (ShardRouting shard : shards) { + for (ShardRouting shard : shardRoutings) { AllocateUnassignedDecision decision = getInEligibleShardDecision(shard, allocation); if (decision != null) { + ineligibleShardAllocationDecisions.put(shard.shardId(), decision); inEligibleShards.add(shard); - shardAllocationDecisions.put(shard, decision); } else { eligibleShards.add(shard); } } - // Do not call fetchData if there are no eligible shards - if (eligibleShards.isEmpty()) { - return shardAllocationDecisions; - } + // only fetch data for eligible shards final FetchResult shardsState = fetchData(eligibleShards, inEligibleShards, allocation); - // process the received data - for (ShardRouting unassignedShard : eligibleShards) { - List nodeShardStates = adaptToNodeShardStates(unassignedShard, shardsState); - // get allocation decision for this shard - shardAllocationDecisions.put(unassignedShard, getAllocationDecision(unassignedShard, allocation, nodeShardStates, logger)); + RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); + while (iterator.hasNext()) { + ShardRouting unassignedShard = iterator.next(); + AllocateUnassignedDecision allocationDecision; + + if (shardRoutings.contains(unassignedShard)) { + assert unassignedShard.primary(); + if (ineligibleShardAllocationDecisions.containsKey(unassignedShard.shardId())) { + allocationDecision = ineligibleShardAllocationDecisions.get(unassignedShard.shardId()); + } else { + List nodeShardStates = adaptToNodeShardStates(unassignedShard, shardsState); + allocationDecision = getAllocationDecision(unassignedShard, allocation, nodeShardStates, logger); + } + executeDecision(unassignedShard, allocationDecision, allocation, iterator); + } } - return shardAllocationDecisions; } /** diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java index be7867b7823f6..f2cb3d053440d 100644 --- a/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; @@ -29,6 +30,8 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.Supplier; +import java.util.stream.Collectors; /** * Allocates replica shards in a batch mode @@ -42,7 +45,7 @@ public abstract class ReplicaShardBatchAllocator extends ReplicaShardAllocator { * match. Today, a better match is one that can perform a no-op recovery while the previous recovery * has to copy segment files. * - * @param allocation the overall routing allocation + * @param allocation the overall routing allocation * @param shardBatches a list of shard batches to check for existing recoveries */ public void processExistingRecoveries(RoutingAllocation allocation, List> shardBatches) { @@ -98,71 +101,92 @@ protected FetchResult> fetchDataResultSupplier = () -> { + return convertToNodeStoreFilesMetadataMap( + unassignedShard, + fetchData(List.of(unassignedShard), Collections.emptyList(), allocation) + ); + }; + return getUnassignedShardAllocationDecision(unassignedShard, allocation, fetchDataResultSupplier); } - @Override - public HashMap makeAllocationDecision( - List shards, - RoutingAllocation allocation, - Logger logger - ) { - HashMap shardAllocationDecisions = new HashMap<>(); - final boolean explain = allocation.debugDecision(); + /** + * Allocate Batch of unassigned shard to nodes where valid copies of the shard already exists + * + * @param shardRoutings the shards to allocate + * @param allocation the allocation state container object + */ + public void allocateUnassignedBatch(List shardRoutings, RoutingAllocation allocation) { List eligibleShards = new ArrayList<>(); List ineligibleShards = new ArrayList<>(); - HashMap>> nodeAllocationDecisions = new HashMap<>(); - for (ShardRouting shard : shards) { - if (!isResponsibleFor(shard)) { - // this allocator n is not responsible for allocating this shard + Map ineligibleShardAllocationDecisions = new HashMap<>(); + + for (ShardRouting shard : shardRoutings) { + AllocateUnassignedDecision shardDecisionWithoutFetch = getUnassignedShardAllocationDecision(shard, allocation, null); + // Without fetchData, decision for in-eligible shards is non-null from our preliminary checks and null for eligible shards. + if (shardDecisionWithoutFetch != null) { ineligibleShards.add(shard); - shardAllocationDecisions.put(shard, AllocateUnassignedDecision.NOT_TAKEN); - continue; + ineligibleShardAllocationDecisions.put(shard, shardDecisionWithoutFetch); + } else { + eligibleShards.add(shard); } + } - Tuple> result = canBeAllocatedToAtLeastOneNode(shard, allocation); - Decision allocationDecision = result.v1(); - if (allocationDecision.type() != Decision.Type.YES && (!explain || !hasInitiatedFetching(shard))) { - // only return early if we are not in explain mode, or we are in explain mode but we have not - // yet attempted to fetch any shard data - logger.trace("{}: ignoring allocation, can't be allocated on any node", shard); - shardAllocationDecisions.put( - shard, - AllocateUnassignedDecision.no( - UnassignedInfo.AllocationStatus.fromDecision(allocationDecision.type()), - result.v2() != null ? new ArrayList<>(result.v2().values()) : null - ) - ); - continue; - } - // storing the nodeDecisions in nodeAllocationDecisions if the decision is not YES - // so that we don't have to compute the decisions again - nodeAllocationDecisions.put(shard, result); + // only fetch data for eligible shards + final FetchResult shardsState = fetchData(eligibleShards, ineligibleShards, allocation); - eligibleShards.add(shard); + List shardIdsFromBatch = shardRoutings.stream().map(shardRouting -> shardRouting.shardId()).collect(Collectors.toList()); + RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); + while (iterator.hasNext()) { + ShardRouting unassignedShard = iterator.next(); + // There will be only one entry for the shard in the unassigned shards batch + // for a shard with multiple unassigned replicas, hence we are comparing the shard ids + // instead of ShardRouting in-order to evaluate shard assignment for all unassigned replicas of a shard. + if (!unassignedShard.primary() && shardIdsFromBatch.contains(unassignedShard.shardId())) { + AllocateUnassignedDecision allocateUnassignedDecision; + if (ineligibleShardAllocationDecisions.containsKey(unassignedShard)) { + allocateUnassignedDecision = ineligibleShardAllocationDecisions.get(unassignedShard); + } else { + // The shard's eligibility is being recomputed again as + // the routing allocation state is updated during shard allocation decision execution + // because of which allocation eligibility of other unassigned shards can change. + allocateUnassignedDecision = getUnassignedShardAllocationDecision( + unassignedShard, + allocation, + () -> convertToNodeStoreFilesMetadataMap(unassignedShard, shardsState) + ); + } + executeDecision(unassignedShard, allocateUnassignedDecision, allocation, iterator); + } } + } - // Do not call fetchData if there are no eligible shards - if (eligibleShards.isEmpty()) { - return shardAllocationDecisions; + private AllocateUnassignedDecision getUnassignedShardAllocationDecision( + ShardRouting shardRouting, + RoutingAllocation allocation, + Supplier> nodeStoreFileMetaDataMapSupplier + ) { + if (!isResponsibleFor(shardRouting)) { + return AllocateUnassignedDecision.NOT_TAKEN; } - // only fetch data for eligible shards - final FetchResult shardsState = fetchData(eligibleShards, ineligibleShards, allocation); + Tuple> result = canBeAllocatedToAtLeastOneNode(shardRouting, allocation); - for (ShardRouting unassignedShard : eligibleShards) { - Tuple> result = nodeAllocationDecisions.get(unassignedShard); - shardAllocationDecisions.put( - unassignedShard, - getAllocationDecision( - unassignedShard, - allocation, - convertToNodeStoreFilesMetadataMap(unassignedShard, shardsState), - result, - logger - ) + final boolean explain = allocation.debugDecision(); + Decision allocationDecision = result.v1(); + if (allocationDecision.type() != Decision.Type.YES && (!explain || !hasInitiatedFetching(shardRouting))) { + // only return early if we are not in explain mode, or we are in explain mode but we have not + // yet attempted to fetch any shard data + logger.trace("{}: ignoring allocation, can't be allocated on any node", shardRouting); + return AllocateUnassignedDecision.no( + UnassignedInfo.AllocationStatus.fromDecision(allocationDecision.type()), + result.v2() != null ? new ArrayList<>(result.v2().values()) : null ); } - return shardAllocationDecisions; + if (nodeStoreFileMetaDataMapSupplier != null) { + Map discoveryNodeStoreFilesMetadataMap = nodeStoreFileMetaDataMapSupplier.get(); + return getAllocationDecision(shardRouting, allocation, discoveryNodeStoreFilesMetadataMap, result, logger); + } + return null; } private Map convertToNodeStoreFilesMetadataMap( diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java index 1fca6d959fbbf..2786cd432b002 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java @@ -516,7 +516,7 @@ public ClusterMetadataManifest(StreamInput in) throws IOException { this.indices = Collections.unmodifiableList(in.readList(UploadedIndexMetadata::new)); this.previousClusterUUID = in.readString(); this.clusterUUIDCommitted = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_15_0)) { this.codecVersion = in.readInt(); this.uploadedCoordinationMetadata = new UploadedMetadataAttribute(in); this.uploadedSettingsMetadata = new UploadedMetadataAttribute(in); @@ -690,7 +690,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(indices); out.writeString(previousClusterUUID); out.writeBoolean(clusterUUIDCommitted); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_15_0)) { out.writeInt(codecVersion); uploadedCoordinationMetadata.writeTo(out); uploadedSettingsMetadata.writeTo(out); @@ -1158,8 +1158,7 @@ public String getComponent() { } public String getUploadedFilename() { - String[] splitPath = uploadedFilename.split("/"); - return splitPath[splitPath.length - 1]; + return uploadedFilename; } public String getIndexName() { diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManager.java index 7e83a7bf7da44..8f986423587d7 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManager.java @@ -10,27 +10,26 @@ import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.ClusterState.Custom; -import org.opensearch.cluster.block.ClusterBlocks; -import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.DiffableUtils; +import org.opensearch.cluster.DiffableUtils.NonDiffableValueSerializer; import org.opensearch.common.CheckedRunnable; import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.RemoteWritableEntityStore; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.compress.Compressor; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.core.xcontent.ToXContent; import org.opensearch.gateway.remote.model.RemoteClusterBlocks; import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; import org.opensearch.gateway.remote.model.RemoteClusterStateCustoms; import org.opensearch.gateway.remote.model.RemoteDiscoveryNodes; import org.opensearch.gateway.remote.model.RemoteReadResult; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.Map; -import java.util.Set; /** * A Manager which provides APIs to upload and download attributes of ClusterState to the {@link RemoteClusterStateBlobStore} @@ -42,27 +41,48 @@ public class RemoteClusterStateAttributesManager { public static final String DISCOVERY_NODES = "nodes"; public static final String CLUSTER_BLOCKS = "blocks"; public static final int CLUSTER_STATE_ATTRIBUTES_CURRENT_CODEC_VERSION = 1; - private final RemoteClusterStateBlobStore clusterBlocksBlobStore; - private final RemoteClusterStateBlobStore discoveryNodesBlobStore; - private final RemoteClusterStateBlobStore customsBlobStore; - private final Compressor compressor; - private final NamedXContentRegistry namedXContentRegistry; + private final Map remoteWritableEntityStores; private final NamedWriteableRegistry namedWriteableRegistry; RemoteClusterStateAttributesManager( - RemoteClusterStateBlobStore clusterBlocksBlobStore, - RemoteClusterStateBlobStore discoveryNodesBlobStore, - RemoteClusterStateBlobStore customsBlobStore, - Compressor compressor, - NamedXContentRegistry namedXContentRegistry, - NamedWriteableRegistry namedWriteableRegistry + String clusterName, + BlobStoreRepository blobStoreRepository, + BlobStoreTransferService blobStoreTransferService, + NamedWriteableRegistry namedWriteableRegistry, + ThreadPool threadpool ) { - this.clusterBlocksBlobStore = clusterBlocksBlobStore; - this.discoveryNodesBlobStore = discoveryNodesBlobStore; - this.customsBlobStore = customsBlobStore; - this.compressor = compressor; - this.namedXContentRegistry = namedXContentRegistry; this.namedWriteableRegistry = namedWriteableRegistry; + this.remoteWritableEntityStores = new HashMap<>(); + this.remoteWritableEntityStores.put( + RemoteDiscoveryNodes.DISCOVERY_NODES, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); + this.remoteWritableEntityStores.put( + RemoteClusterBlocks.CLUSTER_BLOCKS, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); + this.remoteWritableEntityStores.put( + RemoteClusterStateCustoms.CLUSTER_STATE_CUSTOM, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); } /** @@ -71,10 +91,9 @@ public class RemoteClusterStateAttributesManager { CheckedRunnable getAsyncMetadataWriteAction( String component, AbstractRemoteWritableBlobEntity blobEntity, - RemoteClusterStateBlobStore remoteEntityStore, LatchedActionListener latchedActionListener ) { - return () -> remoteEntityStore.writeAsync(blobEntity, getActionListener(component, blobEntity, latchedActionListener)); + return () -> getStore(blobEntity).writeAsync(blobEntity, getActionListener(component, blobEntity, latchedActionListener)); } private ActionListener getActionListener( @@ -88,31 +107,55 @@ private ActionListener getActionListener( ); } + private RemoteWritableEntityStore getStore(AbstractRemoteWritableBlobEntity entity) { + RemoteWritableEntityStore remoteStore = remoteWritableEntityStores.get(entity.getType()); + if (remoteStore == null) { + throw new IllegalArgumentException("Unknown entity type [" + entity.getType() + "]"); + } + return remoteStore; + } + public CheckedRunnable getAsyncMetadataReadAction( String component, AbstractRemoteWritableBlobEntity blobEntity, - RemoteClusterStateBlobStore remoteEntityStore, LatchedActionListener listener ) { final ActionListener actionListener = ActionListener.wrap( - response -> listener.onResponse(new RemoteReadResult((ToXContent) response, CLUSTER_STATE_ATTRIBUTE, component)), + response -> listener.onResponse(new RemoteReadResult(response, CLUSTER_STATE_ATTRIBUTE, component)), listener::onFailure ); - return () -> remoteEntityStore.readAsync(blobEntity, actionListener); + return () -> getStore(blobEntity).readAsync(blobEntity, actionListener); } - public Map getUpdatedCustoms(ClusterState clusterState, ClusterState previousClusterState) { - Map updatedCustoms = new HashMap<>(); - Set currentCustoms = new HashSet<>(clusterState.customs().keySet()); - for (Map.Entry entry : previousClusterState.customs().entrySet()) { - if (currentCustoms.contains(entry.getKey()) && !entry.getValue().equals(clusterState.customs().get(entry.getKey()))) { - updatedCustoms.put(entry.getKey(), clusterState.customs().get(entry.getKey())); - } - currentCustoms.remove(entry.getKey()); + public DiffableUtils.MapDiff> getUpdatedCustoms( + ClusterState clusterState, + ClusterState previousClusterState, + boolean isRemotePublicationEnabled, + boolean isFirstUpload + ) { + if (!isRemotePublicationEnabled) { + // When isRemotePublicationEnabled is false, we do not want store any custom objects + return DiffableUtils.diff( + Collections.emptyMap(), + Collections.emptyMap(), + DiffableUtils.getStringKeySerializer(), + NonDiffableValueSerializer.getAbstractInstance() + ); } - for (String custom : currentCustoms) { - updatedCustoms.put(custom, clusterState.customs().get(custom)); + if (isFirstUpload) { + // For first upload of ephemeral metadata, we want to upload all customs + return DiffableUtils.diff( + Collections.emptyMap(), + clusterState.customs(), + DiffableUtils.getStringKeySerializer(), + NonDiffableValueSerializer.getAbstractInstance() + ); } - return updatedCustoms; + return DiffableUtils.diff( + previousClusterState.customs(), + clusterState.customs(), + DiffableUtils.getStringKeySerializer(), + NonDiffableValueSerializer.getAbstractInstance() + ); } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java index 2fca239b10efd..99235bc96bfe3 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java @@ -13,6 +13,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Strings; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.routing.remote.RemoteRoutingTableService; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.blobstore.BlobMetadata; @@ -34,12 +35,9 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; -import static org.opensearch.gateway.remote.RemoteClusterStateService.GLOBAL_METADATA_FORMAT; -import static org.opensearch.gateway.remote.RemoteClusterStateService.GLOBAL_METADATA_PATH_TOKEN; -import static org.opensearch.gateway.remote.RemoteClusterStateService.INDEX_METADATA_FORMAT; -import static org.opensearch.gateway.remote.RemoteClusterStateService.INDEX_PATH_TOKEN; -import static org.opensearch.gateway.remote.RemoteClusterStateService.MANIFEST_FILE_PREFIX; -import static org.opensearch.gateway.remote.RemoteClusterStateService.MANIFEST_PATH_TOKEN; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.GLOBAL_METADATA_PATH_TOKEN; +import static org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest.MANIFEST; +import static org.opensearch.gateway.remote.model.RemoteGlobalMetadata.GLOBAL_METADATA_FORMAT; /** * A Manager which provides APIs to clean up stale cluster state files and runs an async stale cleanup task @@ -74,8 +72,14 @@ public class RemoteClusterStateCleanupManager implements Closeable { private long lastCleanupAttemptStateVersion; private final ThreadPool threadpool; private final ClusterApplierService clusterApplierService; + private RemoteManifestManager remoteManifestManager; + private final RemoteRoutingTableService remoteRoutingTableService; - public RemoteClusterStateCleanupManager(RemoteClusterStateService remoteClusterStateService, ClusterService clusterService) { + public RemoteClusterStateCleanupManager( + RemoteClusterStateService remoteClusterStateService, + ClusterService clusterService, + RemoteRoutingTableService remoteRoutingTableService + ) { this.remoteClusterStateService = remoteClusterStateService; this.remoteStateStats = remoteClusterStateService.getStats(); ClusterSettings clusterSettings = clusterService.getClusterSettings(); @@ -85,10 +89,12 @@ public RemoteClusterStateCleanupManager(RemoteClusterStateService remoteClusterS // initialize with 0, a cleanup will be done when this node is elected master node and version is incremented more than threshold this.lastCleanupAttemptStateVersion = 0; clusterSettings.addSettingsUpdateConsumer(REMOTE_CLUSTER_STATE_CLEANUP_INTERVAL_SETTING, this::updateCleanupInterval); + this.remoteRoutingTableService = remoteRoutingTableService; } void start() { staleFileDeletionTask = new AsyncStaleFileDeletion(this); + remoteManifestManager = remoteClusterStateService.getRemoteManifestManager(); } @Override @@ -171,14 +177,20 @@ void deleteClusterMetadata( Set staleManifestPaths = new HashSet<>(); Set staleIndexMetadataPaths = new HashSet<>(); Set staleGlobalMetadataPaths = new HashSet<>(); + Set staleEphemeralAttributePaths = new HashSet<>(); + Set staleIndexRoutingPaths = new HashSet<>(); activeManifestBlobMetadata.forEach(blobMetadata -> { - ClusterMetadataManifest clusterMetadataManifest = remoteClusterStateService.fetchRemoteClusterMetadataManifest( + ClusterMetadataManifest clusterMetadataManifest = remoteManifestManager.fetchRemoteClusterMetadataManifest( clusterName, clusterUUID, blobMetadata.name() ); clusterMetadataManifest.getIndices() - .forEach(uploadedIndexMetadata -> filesToKeep.add(uploadedIndexMetadata.getUploadedFilename())); + .forEach( + uploadedIndexMetadata -> filesToKeep.add( + RemoteClusterStateUtils.getFormattedIndexFileName(uploadedIndexMetadata.getUploadedFilename()) + ) + ); if (clusterMetadataManifest.getCodecVersion() == ClusterMetadataManifest.CODEC_V1) { filesToKeep.add(clusterMetadataManifest.getGlobalMetadataFileName()); } else if (clusterMetadataManifest.getCodecVersion() >= ClusterMetadataManifest.CODEC_V2) { @@ -189,47 +201,101 @@ void deleteClusterMetadata( .values() .forEach(attribute -> filesToKeep.add(attribute.getUploadedFilename())); } + if (clusterMetadataManifest.getTransientSettingsMetadata() != null) { + filesToKeep.add(clusterMetadataManifest.getTransientSettingsMetadata().getUploadedFilename()); + } + if (clusterMetadataManifest.getHashesOfConsistentSettings() != null) { + filesToKeep.add(clusterMetadataManifest.getHashesOfConsistentSettings().getUploadedFilename()); + } + if (clusterMetadataManifest.getDiscoveryNodesMetadata() != null) { + filesToKeep.add(clusterMetadataManifest.getDiscoveryNodesMetadata().getUploadedFilename()); + } + if (clusterMetadataManifest.getClusterBlocksMetadata() != null) { + filesToKeep.add(clusterMetadataManifest.getClusterBlocksMetadata().getUploadedFilename()); + } + if (clusterMetadataManifest.getClusterStateCustomMap() != null) { + clusterMetadataManifest.getClusterStateCustomMap() + .values() + .forEach(attribute -> filesToKeep.add(attribute.getUploadedFilename())); + } + if (clusterMetadataManifest.getIndicesRouting() != null) { + clusterMetadataManifest.getIndicesRouting() + .forEach(uploadedIndicesRouting -> filesToKeep.add(uploadedIndicesRouting.getUploadedFilename())); + } }); staleManifestBlobMetadata.forEach(blobMetadata -> { - ClusterMetadataManifest clusterMetadataManifest = remoteClusterStateService.fetchRemoteClusterMetadataManifest( + ClusterMetadataManifest clusterMetadataManifest = remoteManifestManager.fetchRemoteClusterMetadataManifest( clusterName, clusterUUID, blobMetadata.name() ); - staleManifestPaths.add(new BlobPath().add(MANIFEST_PATH_TOKEN).buildAsString() + blobMetadata.name()); + staleManifestPaths.add( + remoteManifestManager.getManifestFolderPath(clusterName, clusterUUID).buildAsString() + blobMetadata.name() + ); if (clusterMetadataManifest.getCodecVersion() == ClusterMetadataManifest.CODEC_V1) { addStaleGlobalMetadataPath(clusterMetadataManifest.getGlobalMetadataFileName(), filesToKeep, staleGlobalMetadataPaths); } else if (clusterMetadataManifest.getCodecVersion() >= ClusterMetadataManifest.CODEC_V2) { - addStaleGlobalMetadataPath( - clusterMetadataManifest.getCoordinationMetadata().getUploadedFilename(), - filesToKeep, - staleGlobalMetadataPaths - ); - addStaleGlobalMetadataPath( - clusterMetadataManifest.getSettingsMetadata().getUploadedFilename(), - filesToKeep, - staleGlobalMetadataPaths - ); - addStaleGlobalMetadataPath( - clusterMetadataManifest.getTemplatesMetadata().getUploadedFilename(), - filesToKeep, - staleGlobalMetadataPaths - ); + if (filesToKeep.contains(clusterMetadataManifest.getCoordinationMetadata().getUploadedFilename()) == false) { + staleGlobalMetadataPaths.add(clusterMetadataManifest.getCoordinationMetadata().getUploadedFilename()); + } + if (filesToKeep.contains(clusterMetadataManifest.getSettingsMetadata().getUploadedFilename()) == false) { + staleGlobalMetadataPaths.add(clusterMetadataManifest.getSettingsMetadata().getUploadedFilename()); + } + if (filesToKeep.contains(clusterMetadataManifest.getTemplatesMetadata().getUploadedFilename()) == false) { + staleGlobalMetadataPaths.add(clusterMetadataManifest.getTemplatesMetadata().getUploadedFilename()); + } clusterMetadataManifest.getCustomMetadataMap() .values() - .forEach( - attribute -> addStaleGlobalMetadataPath(attribute.getUploadedFilename(), filesToKeep, staleGlobalMetadataPaths) - ); + .stream() + .map(ClusterMetadataManifest.UploadedMetadataAttribute::getUploadedFilename) + .filter(file -> filesToKeep.contains(file) == false) + .forEach(staleGlobalMetadataPaths::add); + } + if (clusterMetadataManifest.getIndicesRouting() != null) { + clusterMetadataManifest.getIndicesRouting().forEach(uploadedIndicesRouting -> { + if (!filesToKeep.contains(uploadedIndicesRouting.getUploadedFilename())) { + staleIndexRoutingPaths.add(uploadedIndicesRouting.getUploadedFilename()); + logger.debug( + () -> new ParameterizedMessage( + "Indices routing paths in stale manifest: {}", + uploadedIndicesRouting.getUploadedFilename() + ) + ); + } + }); } clusterMetadataManifest.getIndices().forEach(uploadedIndexMetadata -> { - if (filesToKeep.contains(uploadedIndexMetadata.getUploadedFilename()) == false) { - staleIndexMetadataPaths.add( - new BlobPath().add(INDEX_PATH_TOKEN).add(uploadedIndexMetadata.getIndexUUID()).buildAsString() - + INDEX_METADATA_FORMAT.blobName(uploadedIndexMetadata.getUploadedFilename()) - ); + String fileName = RemoteClusterStateUtils.getFormattedIndexFileName(uploadedIndexMetadata.getUploadedFilename()); + if (filesToKeep.contains(fileName) == false) { + staleIndexMetadataPaths.add(fileName); } }); + + if (clusterMetadataManifest.getClusterBlocksMetadata() != null + && !filesToKeep.contains(clusterMetadataManifest.getClusterBlocksMetadata().getUploadedFilename())) { + staleEphemeralAttributePaths.add(clusterMetadataManifest.getClusterBlocksMetadata().getUploadedFilename()); + } + if (clusterMetadataManifest.getDiscoveryNodesMetadata() != null + && !filesToKeep.contains(clusterMetadataManifest.getDiscoveryNodesMetadata().getUploadedFilename())) { + staleEphemeralAttributePaths.add(clusterMetadataManifest.getDiscoveryNodesMetadata().getUploadedFilename()); + } + if (clusterMetadataManifest.getTransientSettingsMetadata() != null + && !filesToKeep.contains(clusterMetadataManifest.getTransientSettingsMetadata().getUploadedFilename())) { + staleEphemeralAttributePaths.add(clusterMetadataManifest.getTransientSettingsMetadata().getUploadedFilename()); + } + if (clusterMetadataManifest.getHashesOfConsistentSettings() != null + && !filesToKeep.contains(clusterMetadataManifest.getHashesOfConsistentSettings().getUploadedFilename())) { + staleEphemeralAttributePaths.add(clusterMetadataManifest.getHashesOfConsistentSettings().getUploadedFilename()); + } + if (clusterMetadataManifest.getClusterStateCustomMap() != null) { + clusterMetadataManifest.getCustomMetadataMap() + .values() + .stream() + .filter(u -> !filesToKeep.contains(u.getUploadedFilename())) + .forEach(attribute -> staleEphemeralAttributePaths.add(attribute.getUploadedFilename())); + } + }); if (staleManifestPaths.isEmpty()) { @@ -237,9 +303,19 @@ void deleteClusterMetadata( return; } - deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleGlobalMetadataPaths)); - deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleIndexMetadataPaths)); - deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleManifestPaths)); + deleteStalePaths(new ArrayList<>(staleGlobalMetadataPaths)); + deleteStalePaths(new ArrayList<>(staleIndexMetadataPaths)); + deleteStalePaths(new ArrayList<>(staleEphemeralAttributePaths)); + deleteStalePaths(new ArrayList<>(staleManifestPaths)); + try { + remoteRoutingTableService.deleteStaleIndexRoutingPaths(new ArrayList<>(staleIndexRoutingPaths)); + } catch (IOException e) { + logger.error( + () -> new ParameterizedMessage("Error while deleting stale index routing files {}", staleIndexRoutingPaths), + e + ); + remoteStateStats.indexRoutingFilesCleanupAttemptFailed(); + } } catch (IllegalStateException e) { logger.error("Error while fetching Remote Cluster Metadata manifests", e); } catch (IOException e) { @@ -267,8 +343,8 @@ void deleteStaleClusterMetadata(String clusterName, String clusterUUID, int mani try { getBlobStoreTransferService().listAllInSortedOrderAsync( ThreadPool.Names.REMOTE_PURGE, - remoteClusterStateService.getManifestFolderPath(clusterName, clusterUUID), - MANIFEST_FILE_PREFIX, + remoteManifestManager.getManifestFolderPath(clusterName, clusterUUID), + MANIFEST, Integer.MAX_VALUE, new ActionListener<>() { @Override @@ -312,7 +388,11 @@ void deleteStaleUUIDsClusterMetadata(String clusterName, List clusterUUI clusterUUIDs.forEach( clusterUUID -> getBlobStoreTransferService().deleteAsync( ThreadPool.Names.REMOTE_PURGE, - remoteClusterStateService.getCusterMetadataBasePath(clusterName, clusterUUID), + RemoteClusterStateUtils.getClusterMetadataBasePath( + remoteClusterStateService.getBlobStoreRepository(), + clusterName, + clusterUUID + ), new ActionListener<>() { @Override public void onResponse(Void unused) { @@ -336,12 +416,9 @@ public void onFailure(Exception e) { } // package private for testing - void deleteStalePaths(String clusterName, String clusterUUID, List stalePaths) throws IOException { + void deleteStalePaths(List stalePaths) throws IOException { logger.debug(String.format(Locale.ROOT, "Deleting stale files from remote - %s", stalePaths)); - getBlobStoreTransferService().deleteBlobs( - remoteClusterStateService.getCusterMetadataBasePath(clusterName, clusterUUID), - stalePaths - ); + getBlobStoreTransferService().deleteBlobs(BlobPath.cleanPath(), stalePaths); } /** diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index f3c90239c7adb..4a1c9c8615e39 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -11,15 +11,21 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.Version; import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.DiffableUtils; +import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.CoordinationMetadata; +import org.opensearch.cluster.metadata.DiffableStringMap; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.Metadata.XContentContext; import org.opensearch.cluster.metadata.TemplatesMetadata; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.node.DiscoveryNodes.Builder; import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.remote.InternalRemoteRoutingTableService; import org.opensearch.cluster.routing.remote.RemoteRoutingTableService; import org.opensearch.cluster.routing.remote.RemoteRoutingTableServiceFactory; @@ -27,44 +33,52 @@ import org.opensearch.common.CheckedRunnable; import org.opensearch.common.Nullable; import org.opensearch.common.blobstore.BlobContainer; -import org.opensearch.common.blobstore.BlobMetadata; -import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; +import org.opensearch.gateway.remote.model.RemoteClusterBlocks; +import org.opensearch.gateway.remote.model.RemoteClusterStateCustoms; import org.opensearch.gateway.remote.model.RemoteClusterStateManifestInfo; -import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.gateway.remote.model.RemoteCoordinationMetadata; +import org.opensearch.gateway.remote.model.RemoteCustomMetadata; +import org.opensearch.gateway.remote.model.RemoteDiscoveryNodes; +import org.opensearch.gateway.remote.model.RemoteHashesOfConsistentSettings; +import org.opensearch.gateway.remote.model.RemoteIndexMetadata; +import org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata; +import org.opensearch.gateway.remote.model.RemoteReadResult; +import org.opensearch.gateway.remote.model.RemoteTemplatesMetadata; +import org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.node.Node; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.blobstore.BlobStoreRepository; -import org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat; import org.opensearch.threadpool.ThreadPool; import java.io.Closeable; import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.util.ArrayList; -import java.util.Base64; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; @@ -73,9 +87,26 @@ import java.util.function.Supplier; import java.util.stream.Collectors; -import static java.util.Objects.requireNonNull; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; import static org.opensearch.gateway.PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD; -import static org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest.MANIFEST_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V2; +import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_BLOCKS; +import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_STATE_ATTRIBUTE; +import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.DISCOVERY_NODES; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.UploadedMetadataResults; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.clusterUUIDContainer; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.getClusterMetadataBasePath; +import static org.opensearch.gateway.remote.model.RemoteClusterStateCustoms.CLUSTER_STATE_CUSTOM; +import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA; +import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_DELIMITER; +import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_METADATA; +import static org.opensearch.gateway.remote.model.RemoteHashesOfConsistentSettings.HASHES_OF_CONSISTENT_SETTINGS; +import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTING_METADATA; +import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA; +import static org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata.TRANSIENT_SETTING_METADATA; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; /** @@ -85,99 +116,8 @@ */ public class RemoteClusterStateService implements Closeable { - public static final String METADATA_NAME_FORMAT = "%s.dat"; - - public static final String METADATA_MANIFEST_NAME_FORMAT = "%s"; - - public static final String DELIMITER = "__"; - public static final String CUSTOM_DELIMITER = "--"; - private static final Logger logger = LogManager.getLogger(RemoteClusterStateService.class); - public static final TimeValue INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); - - public static final TimeValue GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); - - public static final TimeValue METADATA_MANIFEST_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); - - public static final Setting INDEX_METADATA_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( - "cluster.remote_store.state.index_metadata.upload_timeout", - INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - public static final Setting GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( - "cluster.remote_store.state.global_metadata.upload_timeout", - GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - public static final Setting METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( - "cluster.remote_store.state.metadata_manifest.upload_timeout", - METADATA_MANIFEST_UPLOAD_TIMEOUT_DEFAULT, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - public static final ChecksumBlobStoreFormat INDEX_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( - "index-metadata", - METADATA_NAME_FORMAT, - IndexMetadata::fromXContent - ); - - public static final ChecksumBlobStoreFormat GLOBAL_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( - "metadata", - METADATA_NAME_FORMAT, - Metadata::fromXContent - ); - - public static final ChecksumBlobStoreFormat COORDINATION_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( - "coordination", - METADATA_NAME_FORMAT, - CoordinationMetadata::fromXContent - ); - - public static final ChecksumBlobStoreFormat SETTINGS_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( - "settings", - METADATA_NAME_FORMAT, - Settings::fromXContent - ); - - public static final ChecksumBlobStoreFormat TEMPLATES_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( - "templates", - METADATA_NAME_FORMAT, - TemplatesMetadata::fromXContent - ); - - public static final ChecksumBlobStoreFormat CUSTOM_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( - "custom", - METADATA_NAME_FORMAT, - null // no need of reader here, as this object is only used to write/serialize the object - ); - - /** - * Manifest format compatible with older codec v0, where codec version was missing. - */ - public static final ChecksumBlobStoreFormat CLUSTER_METADATA_MANIFEST_FORMAT_V0 = - new ChecksumBlobStoreFormat<>("cluster-metadata-manifest", METADATA_MANIFEST_NAME_FORMAT, ClusterMetadataManifest::fromXContentV0); - - /** - * Manifest format compatible with older codec v1, where codec versions/global metadata was introduced. - */ - public static final ChecksumBlobStoreFormat CLUSTER_METADATA_MANIFEST_FORMAT_V1 = - new ChecksumBlobStoreFormat<>("cluster-metadata-manifest", METADATA_MANIFEST_NAME_FORMAT, ClusterMetadataManifest::fromXContentV1); - - /** - * Manifest format compatible with codec v2, where global metadata file is replaced with multiple metadata attribute files - */ - public static final ChecksumBlobStoreFormat CLUSTER_METADATA_MANIFEST_FORMAT = new ChecksumBlobStoreFormat<>( - "cluster-metadata-manifest", - METADATA_MANIFEST_NAME_FORMAT, - ClusterMetadataManifest::fromXContent - ); - /** * Used to specify if cluster state metadata should be published to remote store */ @@ -188,18 +128,16 @@ public class RemoteClusterStateService implements Closeable { Property.Final ); - public static final String CLUSTER_STATE_PATH_TOKEN = "cluster-state"; - public static final String INDEX_PATH_TOKEN = "index"; - public static final String GLOBAL_METADATA_PATH_TOKEN = "global-metadata"; - public static final String MANIFEST_PATH_TOKEN = "manifest"; - public static final String MANIFEST_FILE_PREFIX = "manifest"; - public static final String METADATA_FILE_PREFIX = "metadata"; - public static final String COORDINATION_METADATA = "coordination"; - public static final String SETTING_METADATA = "settings"; - public static final String TEMPLATES_METADATA = "templates"; - public static final String CUSTOM_METADATA = "custom"; - public static final int SPLITED_MANIFEST_FILE_LENGTH = 6; // file name manifest__term__version__C/P__timestamp__codecversion + public static final TimeValue REMOTE_STATE_READ_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + public static final Setting REMOTE_STATE_READ_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.state.read_timeout", + REMOTE_STATE_READ_TIMEOUT_DEFAULT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private TimeValue remoteStateReadTimeout; private final String nodeId; private final Supplier repositoriesService; private final Settings settings; @@ -211,17 +149,19 @@ public class RemoteClusterStateService implements Closeable { private final RemoteRoutingTableService remoteRoutingTableService; private volatile TimeValue slowWriteLoggingThreshold; - private volatile TimeValue indexMetadataUploadTimeout; - private volatile TimeValue globalMetadataUploadTimeout; - private volatile TimeValue metadataManifestUploadTimeout; - private RemoteClusterStateCleanupManager remoteClusterStateCleanupManager; private final RemotePersistenceStats remoteStateStats; + private RemoteClusterStateCleanupManager remoteClusterStateCleanupManager; + private RemoteIndexMetadataManager remoteIndexMetadataManager; + private RemoteGlobalMetadataManager remoteGlobalMetadataManager; + private RemoteClusterStateAttributesManager remoteClusterStateAttributesManager; + private RemoteManifestManager remoteManifestManager; + private ClusterSettings clusterSettings; + private final NamedWriteableRegistry namedWriteableRegistry; private final String CLUSTER_STATE_UPLOAD_TIME_LOG_STRING = "writing cluster state for version [{}] took [{}ms]"; private final String METADATA_UPDATE_LOG_STRING = "wrote metadata for [{}] indices and skipped [{}] unchanged " + "indices, coordination metadata updated : [{}], settings metadata updated : [{}], templates metadata " + "updated : [{}], custom metadata updated : [{}], indices routing updated : [{}]"; - public static final int INDEX_METADATA_CURRENT_CODEC_VERSION = 1; - public static final int GLOBAL_METADATA_CURRENT_CODEC_VERSION = 2; + private final boolean isPublicationEnabled; // ToXContent Params with gateway mode. // We are using gateway context mode to persist all custom metadata. @@ -240,7 +180,8 @@ public RemoteClusterStateService( ClusterService clusterService, LongSupplier relativeTimeNanosSupplier, ThreadPool threadPool, - List indexMetadataUploadListeners + List indexMetadataUploadListeners, + NamedWriteableRegistry namedWriteableRegistry ) { assert isRemoteStoreClusterStateEnabled(settings) : "Remote cluster state is not enabled"; this.nodeId = nodeId; @@ -248,19 +189,24 @@ public RemoteClusterStateService( this.settings = settings; this.relativeTimeNanosSupplier = relativeTimeNanosSupplier; this.threadpool = threadPool; - ClusterSettings clusterSettings = clusterService.getClusterSettings(); + clusterSettings = clusterService.getClusterSettings(); this.slowWriteLoggingThreshold = clusterSettings.get(SLOW_WRITE_LOGGING_THRESHOLD); - this.indexMetadataUploadTimeout = clusterSettings.get(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING); - this.globalMetadataUploadTimeout = clusterSettings.get(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING); - this.metadataManifestUploadTimeout = clusterSettings.get(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING); clusterSettings.addSettingsUpdateConsumer(SLOW_WRITE_LOGGING_THRESHOLD, this::setSlowWriteLoggingThreshold); - clusterSettings.addSettingsUpdateConsumer(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, this::setIndexMetadataUploadTimeout); - clusterSettings.addSettingsUpdateConsumer(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, this::setGlobalMetadataUploadTimeout); - clusterSettings.addSettingsUpdateConsumer(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, this::setMetadataManifestUploadTimeout); + this.remoteStateReadTimeout = clusterSettings.get(REMOTE_STATE_READ_TIMEOUT_SETTING); + clusterSettings.addSettingsUpdateConsumer(REMOTE_STATE_READ_TIMEOUT_SETTING, this::setRemoteStateReadTimeout); this.remoteStateStats = new RemotePersistenceStats(); - this.remoteClusterStateCleanupManager = new RemoteClusterStateCleanupManager(this, clusterService); + this.namedWriteableRegistry = namedWriteableRegistry; this.indexMetadataUploadListeners = indexMetadataUploadListeners; - this.remoteRoutingTableService = RemoteRoutingTableServiceFactory.getService(repositoriesService, settings, clusterSettings); + this.remoteRoutingTableService = RemoteRoutingTableServiceFactory.getService( + repositoriesService, + settings, + clusterSettings, + threadPool + ); + this.remoteClusterStateCleanupManager = new RemoteClusterStateCleanupManager(this, clusterService, remoteRoutingTableService); + this.isPublicationEnabled = FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) + && RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled(settings) + && RemoteStoreNodeAttribute.isRemoteRoutingTableEnabled(settings); } /** @@ -280,24 +226,26 @@ public RemoteClusterStateManifestInfo writeFullMetadata(ClusterState clusterStat UploadedMetadataResults uploadedMetadataResults = writeMetadataInParallel( clusterState, new ArrayList<>(clusterState.metadata().indices().values()), - Collections.emptyMap(), - clusterState.metadata().customs(), + emptyMap(), + RemoteGlobalMetadataManager.filterCustoms(clusterState.metadata().customs(), isPublicationEnabled), true, true, true, + isPublicationEnabled, + isPublicationEnabled, + isPublicationEnabled, + isPublicationEnabled ? clusterState.customs() : Collections.emptyMap(), + isPublicationEnabled, remoteRoutingTableService.getIndicesRouting(clusterState.getRoutingTable()) ); - final RemoteClusterStateManifestInfo manifestDetails = uploadManifest( + final RemoteClusterStateManifestInfo manifestDetails = remoteManifestManager.uploadManifest( clusterState, - uploadedMetadataResults.uploadedIndexMetadata, + uploadedMetadataResults, previousClusterUUID, - uploadedMetadataResults.uploadedCoordinationMetadata, - uploadedMetadataResults.uploadedSettingsMetadata, - uploadedMetadataResults.uploadedTemplatesMetadata, - uploadedMetadataResults.uploadedCustomMetadataMap, - uploadedMetadataResults.uploadedIndicesRoutingMetadata, + new ClusterStateDiffManifest(clusterState, ClusterState.EMPTY_STATE), false ); + final long durationMillis = TimeValue.nsecToMSec(relativeTimeNanosSupplier.getAsLong() - startTimeNanos); remoteStateStats.stateSucceeded(); remoteStateStats.stateTook(durationMillis); @@ -326,7 +274,7 @@ public RemoteClusterStateManifestInfo writeFullMetadata(ClusterState clusterStat * manifest. The new manifest file is created by using the unchanged metadata from the previous manifest and the new metadata changes from the current * cluster state. * - * @return The uploaded ClusterMetadataManifest file + * @return {@link RemoteClusterStateManifestInfo} object containing uploaded manifest detail */ @Nullable public RemoteClusterStateManifestInfo writeIncrementalMetadata( @@ -334,6 +282,8 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata( ClusterState clusterState, ClusterMetadataManifest previousManifest ) throws IOException { + logger.trace("WRITING INCREMENTAL STATE"); + final long startTimeNanos = relativeTimeNanosSupplier.getAsLong(); if (clusterState.nodes().isLocalNodeElectedClusterManager() == false) { logger.error("Local node is not elected cluster manager. Exiting"); @@ -341,16 +291,17 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata( } assert previousClusterState.metadata().coordinationMetadata().term() == clusterState.metadata().coordinationMetadata().term(); - final Map customsToBeDeletedFromRemote = new HashMap<>(previousManifest.getCustomMetadataMap()); - final Map customsToUpload = getUpdatedCustoms(clusterState, previousClusterState); - final Map allUploadedCustomMap = new HashMap<>(previousManifest.getCustomMetadataMap()); - for (final String custom : clusterState.metadata().customs().keySet()) { - // remove all the customs which are present currently - customsToBeDeletedFromRemote.remove(custom); - } + boolean firstUploadForSplitGlobalMetadata = !previousManifest.hasMetadataAttributesFiles(); + final DiffableUtils.MapDiff> customsDiff = remoteGlobalMetadataManager + .getCustomsDiff(clusterState, previousClusterState, firstUploadForSplitGlobalMetadata, isPublicationEnabled); + final DiffableUtils.MapDiff> clusterStateCustomsDiff = + remoteClusterStateAttributesManager.getUpdatedCustoms(clusterState, previousClusterState, isPublicationEnabled, false); + final Map allUploadedCustomMap = new HashMap<>(previousManifest.getCustomMetadataMap()); + final Map allUploadedClusterStateCustomsMap = new HashMap<>( + previousManifest.getClusterStateCustomMap() + ); final Map indicesToBeDeletedFromRemote = new HashMap<>(previousClusterState.metadata().indices()); - int numIndicesUpdated = 0; int numIndicesUnchanged = 0; final Map allUploadedIndexMetadata = previousManifest.getIndices() @@ -381,31 +332,45 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata( indicesToBeDeletedFromRemote.remove(indexMetadata.getIndex().getName()); } - DiffableUtils.MapDiff> routingTableDiff = remoteRoutingTableService + final DiffableUtils.MapDiff> routingTableDiff = remoteRoutingTableService .getIndicesRoutingMapDiff(previousClusterState.getRoutingTable(), clusterState.getRoutingTable()); - List indicesRoutingToUpload = new ArrayList<>(); + final List indicesRoutingToUpload = new ArrayList<>(); routingTableDiff.getUpserts().forEach((k, v) -> indicesRoutingToUpload.add(v)); UploadedMetadataResults uploadedMetadataResults; // For migration case from codec V0 or V1 to V2, we have added null check on metadata attribute files, // If file is empty and codec is 1 then write global metadata. - boolean firstUploadForSplitGlobalMetadata = !previousManifest.hasMetadataAttributesFiles(); boolean updateCoordinationMetadata = firstUploadForSplitGlobalMetadata || Metadata.isCoordinationMetadataEqual(previousClusterState.metadata(), clusterState.metadata()) == false; ; boolean updateSettingsMetadata = firstUploadForSplitGlobalMetadata || Metadata.isSettingsMetadataEqual(previousClusterState.metadata(), clusterState.metadata()) == false; + boolean updateTransientSettingsMetadata = Metadata.isTransientSettingsMetadataEqual( + previousClusterState.metadata(), + clusterState.metadata() + ) == false; boolean updateTemplatesMetadata = firstUploadForSplitGlobalMetadata || Metadata.isTemplatesMetadataEqual(previousClusterState.metadata(), clusterState.metadata()) == false; + final boolean updateDiscoveryNodes = isPublicationEnabled + && clusterState.getNodes().delta(previousClusterState.getNodes()).hasChanges(); + final boolean updateClusterBlocks = isPublicationEnabled && !clusterState.blocks().equals(previousClusterState.blocks()); + final boolean updateHashesOfConsistentSettings = isPublicationEnabled + || Metadata.isHashesOfConsistentSettingsEqual(previousClusterState.metadata(), clusterState.metadata()) == false; + uploadedMetadataResults = writeMetadataInParallel( clusterState, toUpload, prevIndexMetadataByName, - firstUploadForSplitGlobalMetadata ? clusterState.metadata().customs() : customsToUpload, + customsDiff.getUpserts(), updateCoordinationMetadata, updateSettingsMetadata, updateTemplatesMetadata, + updateDiscoveryNodes, + updateClusterBlocks, + updateTransientSettingsMetadata, + clusterStateCustomsDiff.getUpserts(), + updateHashesOfConsistentSettings, indicesRoutingToUpload ); @@ -414,28 +379,48 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata( uploadedIndexMetadata -> allUploadedIndexMetadata.put(uploadedIndexMetadata.getIndexName(), uploadedIndexMetadata) ); allUploadedCustomMap.putAll(uploadedMetadataResults.uploadedCustomMetadataMap); + allUploadedClusterStateCustomsMap.putAll(uploadedMetadataResults.uploadedClusterStateCustomMetadataMap); // remove the data for removed custom/indices - customsToBeDeletedFromRemote.keySet().forEach(allUploadedCustomMap::remove); + customsDiff.getDeletes().forEach(allUploadedCustomMap::remove); indicesToBeDeletedFromRemote.keySet().forEach(allUploadedIndexMetadata::remove); + clusterStateCustomsDiff.getDeletes().forEach(allUploadedClusterStateCustomsMap::remove); - List allUploadedIndicesRouting = new ArrayList<>(); - allUploadedIndicesRouting = remoteRoutingTableService.getAllUploadedIndicesRouting( + if (!updateCoordinationMetadata) { + uploadedMetadataResults.uploadedCoordinationMetadata = previousManifest.getCoordinationMetadata(); + } + if (!updateSettingsMetadata) { + uploadedMetadataResults.uploadedSettingsMetadata = previousManifest.getSettingsMetadata(); + } + if (!updateTransientSettingsMetadata) { + uploadedMetadataResults.uploadedTransientSettingsMetadata = previousManifest.getTransientSettingsMetadata(); + } + if (!updateTemplatesMetadata) { + uploadedMetadataResults.uploadedTemplatesMetadata = previousManifest.getTemplatesMetadata(); + } + if (!updateDiscoveryNodes) { + uploadedMetadataResults.uploadedDiscoveryNodes = previousManifest.getDiscoveryNodesMetadata(); + } + if (!updateClusterBlocks) { + uploadedMetadataResults.uploadedClusterBlocks = previousManifest.getClusterBlocksMetadata(); + } + if (!updateHashesOfConsistentSettings) { + uploadedMetadataResults.uploadedHashesOfConsistentSettings = previousManifest.getHashesOfConsistentSettings(); + } + uploadedMetadataResults.uploadedCustomMetadataMap = allUploadedCustomMap; + uploadedMetadataResults.uploadedClusterStateCustomMetadataMap = allUploadedClusterStateCustomsMap; + uploadedMetadataResults.uploadedIndexMetadata = new ArrayList<>(allUploadedIndexMetadata.values()); + + uploadedMetadataResults.uploadedIndicesRoutingMetadata = remoteRoutingTableService.getAllUploadedIndicesRouting( previousManifest, uploadedMetadataResults.uploadedIndicesRoutingMetadata, routingTableDiff.getDeletes() ); - final RemoteClusterStateManifestInfo manifestDetails = uploadManifest( + final RemoteClusterStateManifestInfo manifestDetails = remoteManifestManager.uploadManifest( clusterState, - new ArrayList<>(allUploadedIndexMetadata.values()), + uploadedMetadataResults, previousManifest.getPreviousClusterUUID(), - updateCoordinationMetadata ? uploadedMetadataResults.uploadedCoordinationMetadata : previousManifest.getCoordinationMetadata(), - updateSettingsMetadata ? uploadedMetadataResults.uploadedSettingsMetadata : previousManifest.getSettingsMetadata(), - updateTemplatesMetadata ? uploadedMetadataResults.uploadedTemplatesMetadata : previousManifest.getTemplatesMetadata(), - firstUploadForSplitGlobalMetadata || !customsToUpload.isEmpty() - ? allUploadedCustomMap - : previousManifest.getCustomMetadataMap(), - allUploadedIndicesRouting, + new ClusterStateDiffManifest(clusterState, previousClusterState), false ); @@ -454,18 +439,39 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata( updateCoordinationMetadata, updateSettingsMetadata, updateTemplatesMetadata, - customsToUpload.size(), + customsDiff.getUpserts().size(), indicesRoutingToUpload.size() ); if (durationMillis >= slowWriteLoggingThreshold.getMillis()) { + // TODO update logs to add more details about objects uploaded logger.warn( - "{} which is above the warn threshold of [{}]; {}", - clusterStateUploadTimeMessage, + "writing cluster state took [{}ms] which is above the warn threshold of [{}]; " + + "wrote metadata for [{}] indices and skipped [{}] unchanged indices, coordination metadata updated : [{}], " + + "settings metadata updated : [{}], templates metadata updated : [{}], custom metadata updated : [{}]", + durationMillis, slowWriteLoggingThreshold, - metadataUpdateMessage + numIndicesUpdated, + numIndicesUnchanged, + updateCoordinationMetadata, + updateSettingsMetadata, + updateTemplatesMetadata, + customsDiff.getUpserts().size() ); } else { logger.info("{}; {}", clusterStateUploadTimeMessage, metadataUpdateMessage); + logger.info( + "writing cluster state for version [{}] took [{}ms]; " + + "wrote metadata for [{}] indices and skipped [{}] unchanged indices, coordination metadata updated : [{}], " + + "settings metadata updated : [{}], templates metadata updated : [{}], custom metadata updated : [{}]", + manifestDetails.getClusterMetadataManifest().getStateVersion(), + durationMillis, + numIndicesUpdated, + numIndicesUnchanged, + updateCoordinationMetadata, + updateSettingsMetadata, + updateTemplatesMetadata, + customsDiff.getUpserts().size() + ); } return manifestDetails; } @@ -478,15 +484,21 @@ private UploadedMetadataResults writeMetadataInParallel( boolean uploadCoordinationMetadata, boolean uploadSettingsMetadata, boolean uploadTemplateMetadata, + boolean uploadDiscoveryNodes, + boolean uploadClusterBlock, + boolean uploadTransientSettingMetadata, + Map clusterStateCustomToUpload, + boolean uploadHashesOfConsistentSettings, List indicesRoutingToUpload ) throws IOException { assert Objects.nonNull(indexMetadataUploadListeners) : "indexMetadataUploadListeners can not be null"; int totalUploadTasks = indexToUpload.size() + indexMetadataUploadListeners.size() + customToUpload.size() + (uploadCoordinationMetadata ? 1 : 0) + (uploadSettingsMetadata ? 1 : 0) + (uploadTemplateMetadata ? 1 : 0) - + indicesRoutingToUpload.size(); + + (uploadDiscoveryNodes ? 1 : 0) + (uploadClusterBlock ? 1 : 0) + (uploadTransientSettingMetadata ? 1 : 0) + + clusterStateCustomToUpload.size() + (uploadHashesOfConsistentSettings ? 1 : 0) + indicesRoutingToUpload.size(); CountDownLatch latch = new CountDownLatch(totalUploadTasks); - Map> uploadTasks = new HashMap<>(totalUploadTasks); - Map results = new HashMap<>(totalUploadTasks); + Map> uploadTasks = new ConcurrentHashMap<>(totalUploadTasks); + Map results = new ConcurrentHashMap<>(totalUploadTasks); List exceptionList = Collections.synchronizedList(new ArrayList<>(totalUploadTasks)); LatchedActionListener listener = new LatchedActionListener<>( @@ -506,11 +518,29 @@ private UploadedMetadataResults writeMetadataInParallel( if (uploadSettingsMetadata) { uploadTasks.put( SETTING_METADATA, - getAsyncMetadataWriteAction( - clusterState, - SETTING_METADATA, - SETTINGS_METADATA_FORMAT, - clusterState.metadata().persistentSettings(), + remoteGlobalMetadataManager.getAsyncMetadataWriteAction( + new RemotePersistentSettingsMetadata( + clusterState.metadata().persistentSettings(), + clusterState.metadata().version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + listener + ) + ); + } + if (uploadTransientSettingMetadata) { + uploadTasks.put( + TRANSIENT_SETTING_METADATA, + remoteGlobalMetadataManager.getAsyncMetadataWriteAction( + new RemoteTransientSettingsMetadata( + clusterState.metadata().transientSettings(), + clusterState.metadata().version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), listener ) ); @@ -518,11 +548,14 @@ private UploadedMetadataResults writeMetadataInParallel( if (uploadCoordinationMetadata) { uploadTasks.put( COORDINATION_METADATA, - getAsyncMetadataWriteAction( - clusterState, - COORDINATION_METADATA, - COORDINATION_METADATA_FORMAT, - clusterState.metadata().coordinationMetadata(), + remoteGlobalMetadataManager.getAsyncMetadataWriteAction( + new RemoteCoordinationMetadata( + clusterState.metadata().coordinationMetadata(), + clusterState.metadata().version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), listener ) ); @@ -530,11 +563,58 @@ private UploadedMetadataResults writeMetadataInParallel( if (uploadTemplateMetadata) { uploadTasks.put( TEMPLATES_METADATA, - getAsyncMetadataWriteAction( - clusterState, - TEMPLATES_METADATA, - TEMPLATES_METADATA_FORMAT, - clusterState.metadata().templatesMetadata(), + remoteGlobalMetadataManager.getAsyncMetadataWriteAction( + new RemoteTemplatesMetadata( + clusterState.metadata().templatesMetadata(), + clusterState.metadata().version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + listener + ) + ); + } + if (uploadDiscoveryNodes) { + uploadTasks.put( + DISCOVERY_NODES, + remoteClusterStateAttributesManager.getAsyncMetadataWriteAction( + RemoteDiscoveryNodes.DISCOVERY_NODES, + new RemoteDiscoveryNodes( + clusterState.nodes(), + clusterState.version(), + clusterState.stateUUID(), + blobStoreRepository.getCompressor() + ), + listener + ) + ); + } + if (uploadClusterBlock) { + uploadTasks.put( + CLUSTER_BLOCKS, + remoteClusterStateAttributesManager.getAsyncMetadataWriteAction( + RemoteClusterBlocks.CLUSTER_BLOCKS, + new RemoteClusterBlocks( + clusterState.blocks(), + clusterState.version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor() + ), + listener + ) + ); + } + if (uploadHashesOfConsistentSettings) { + uploadTasks.put( + HASHES_OF_CONSISTENT_SETTINGS, + remoteGlobalMetadataManager.getAsyncMetadataWriteAction( + new RemoteHashesOfConsistentSettings( + (DiffableStringMap) clusterState.metadata().hashesOfConsistentSettings(), + clusterState.metadata().version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor() + ), listener ) ); @@ -543,13 +623,43 @@ private UploadedMetadataResults writeMetadataInParallel( String customComponent = String.join(CUSTOM_DELIMITER, CUSTOM_METADATA, key); uploadTasks.put( customComponent, - getAsyncMetadataWriteAction(clusterState, customComponent, CUSTOM_METADATA_FORMAT, value, listener) + remoteGlobalMetadataManager.getAsyncMetadataWriteAction( + new RemoteCustomMetadata( + value, + key, + clusterState.metadata().version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor(), + namedWriteableRegistry + ), + listener + ) ); }); indexToUpload.forEach(indexMetadata -> { - uploadTasks.put(indexMetadata.getIndex().getName(), getIndexMetadataAsyncAction(clusterState, indexMetadata, listener)); + uploadTasks.put( + indexMetadata.getIndex().getName(), + remoteIndexMetadataManager.getAsyncIndexMetadataWriteAction(indexMetadata, clusterState.metadata().clusterUUID(), listener) + ); }); + clusterStateCustomToUpload.forEach((key, value) -> { + uploadTasks.put( + key, + remoteClusterStateAttributesManager.getAsyncMetadataWriteAction( + CLUSTER_STATE_CUSTOM, + new RemoteClusterStateCustoms( + value, + key, + clusterState.version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor(), + namedWriteableRegistry + ), + listener + ) + ); + }); indicesRoutingToUpload.forEach(indexRoutingTable -> { uploadTasks.put( InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX + indexRoutingTable.getIndex().getName(), @@ -557,7 +667,11 @@ private UploadedMetadataResults writeMetadataInParallel( clusterState, indexRoutingTable, listener, - getCusterMetadataBasePath(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID()) + getClusterMetadataBasePath( + blobStoreRepository, + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ) ) ); }); @@ -566,11 +680,10 @@ private UploadedMetadataResults writeMetadataInParallel( for (CheckedRunnable uploadTask : uploadTasks.values()) { uploadTask.run(); } - invokeIndexMetadataUploadListeners(indexToUpload, prevIndexMetadataByName, latch, exceptionList); try { - if (latch.await(getGlobalMetadataUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { + if (latch.await(remoteGlobalMetadataManager.getGlobalMetadataUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { // TODO: We should add metrics where transfer is timing out. [Issue: #10687] RemoteStateTransferException ex = new RemoteStateTransferException( String.format( @@ -611,25 +724,40 @@ private UploadedMetadataResults writeMetadataInParallel( if (uploadedMetadata.getClass().equals(UploadedIndexMetadata.class) && uploadedMetadata.getComponent().contains(InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX)) { response.uploadedIndicesRoutingMetadata.add((UploadedIndexMetadata) uploadedMetadata); - } else if (name.contains(CUSTOM_METADATA)) { + } else if (name.startsWith(CUSTOM_METADATA)) { // component name for custom metadata will look like custom-- String custom = name.split(DELIMITER)[0].split(CUSTOM_DELIMITER)[1]; response.uploadedCustomMetadataMap.put( custom, new UploadedMetadataAttribute(custom, uploadedMetadata.getUploadedFilename()) ); + } else if (name.startsWith(CLUSTER_STATE_CUSTOM)) { + String custom = name.split(DELIMITER)[0].split(CUSTOM_DELIMITER)[1]; + response.uploadedClusterStateCustomMetadataMap.put( + custom, + new UploadedMetadataAttribute(custom, uploadedMetadata.getUploadedFilename()) + ); } else if (COORDINATION_METADATA.equals(name)) { response.uploadedCoordinationMetadata = (UploadedMetadataAttribute) uploadedMetadata; - } else if (SETTING_METADATA.equals(name)) { + } else if (RemotePersistentSettingsMetadata.SETTING_METADATA.equals(name)) { response.uploadedSettingsMetadata = (UploadedMetadataAttribute) uploadedMetadata; } else if (TEMPLATES_METADATA.equals(name)) { response.uploadedTemplatesMetadata = (UploadedMetadataAttribute) uploadedMetadata; } else if (name.contains(UploadedIndexMetadata.COMPONENT_PREFIX)) { response.uploadedIndexMetadata.add((UploadedIndexMetadata) uploadedMetadata); + } else if (RemoteTransientSettingsMetadata.TRANSIENT_SETTING_METADATA.equals(name)) { + response.uploadedTransientSettingsMetadata = (UploadedMetadataAttribute) uploadedMetadata; + } else if (RemoteDiscoveryNodes.DISCOVERY_NODES.equals(uploadedMetadata.getComponent())) { + response.uploadedDiscoveryNodes = (UploadedMetadataAttribute) uploadedMetadata; + } else if (RemoteClusterBlocks.CLUSTER_BLOCKS.equals(uploadedMetadata.getComponent())) { + response.uploadedClusterBlocks = (UploadedMetadataAttribute) uploadedMetadata; + } else if (RemoteHashesOfConsistentSettings.HASHES_OF_CONSISTENT_SETTINGS.equals(uploadedMetadata.getComponent())) { + response.uploadedHashesOfConsistentSettings = (UploadedMetadataAttribute) uploadedMetadata; } else { throw new IllegalStateException("Unknown metadata component name " + name); } }); + logger.trace("response {}", response.uploadedIndicesRoutingMetadata.toString()); return response; } @@ -690,73 +818,8 @@ private ActionListener getIndexMetadataUploadActionListener( ); } - /** - * Allows async Upload of IndexMetadata to remote - * - * @param clusterState current ClusterState - * @param indexMetadata {@link IndexMetadata} to upload - * @param latchedActionListener listener to respond back on after upload finishes - */ - private CheckedRunnable getIndexMetadataAsyncAction( - ClusterState clusterState, - IndexMetadata indexMetadata, - LatchedActionListener latchedActionListener - ) { - final BlobContainer indexMetadataContainer = indexMetadataContainer( - clusterState.getClusterName().value(), - clusterState.metadata().clusterUUID(), - indexMetadata.getIndexUUID() - ); - final String indexMetadataFilename = indexMetadataFileName(indexMetadata); - ActionListener completionListener = ActionListener.wrap( - resp -> latchedActionListener.onResponse( - new UploadedIndexMetadata( - indexMetadata.getIndex().getName(), - indexMetadata.getIndexUUID(), - indexMetadataContainer.path().buildAsString() + indexMetadataFilename - ) - ), - ex -> latchedActionListener.onFailure(new RemoteStateTransferException(indexMetadata.getIndex().toString(), ex)) - ); - - return () -> INDEX_METADATA_FORMAT.writeAsyncWithUrgentPriority( - indexMetadata, - indexMetadataContainer, - indexMetadataFilename, - blobStoreRepository.getCompressor(), - completionListener, - FORMAT_PARAMS - ); - } - - /** - * Allows async upload of Metadata components to remote - */ - - private CheckedRunnable getAsyncMetadataWriteAction( - ClusterState clusterState, - String component, - ChecksumBlobStoreFormat componentMetadataBlobStore, - ToXContent componentMetadata, - LatchedActionListener latchedActionListener - ) { - final BlobContainer globalMetadataContainer = globalMetadataContainer( - clusterState.getClusterName().value(), - clusterState.metadata().clusterUUID() - ); - final String componentMetadataFilename = metadataAttributeFileName(component, clusterState.metadata().version()); - ActionListener completionListener = ActionListener.wrap( - resp -> latchedActionListener.onResponse(new UploadedMetadataAttribute(component, componentMetadataFilename)), - ex -> latchedActionListener.onFailure(new RemoteStateTransferException(component, ex)) - ); - return () -> componentMetadataBlobStore.writeAsyncWithUrgentPriority( - componentMetadata, - globalMetadataContainer, - componentMetadataFilename, - blobStoreRepository.getCompressor(), - completionListener, - FORMAT_PARAMS - ); + public RemoteManifestManager getRemoteManifestManager() { + return remoteManifestManager; } public RemoteClusterStateCleanupManager getCleanupManager() { @@ -772,15 +835,25 @@ public RemoteClusterStateManifestInfo markLastStateAsCommitted(ClusterState clus return null; } assert previousManifest != null : "Last cluster metadata manifest is not set"; - RemoteClusterStateManifestInfo committedManifestDetails = uploadManifest( - clusterState, + UploadedMetadataResults uploadedMetadataResults = new UploadedMetadataResults( previousManifest.getIndices(), - previousManifest.getPreviousClusterUUID(), + previousManifest.getCustomMetadataMap(), previousManifest.getCoordinationMetadata(), previousManifest.getSettingsMetadata(), previousManifest.getTemplatesMetadata(), - previousManifest.getCustomMetadataMap(), + previousManifest.getTransientSettingsMetadata(), + previousManifest.getDiscoveryNodesMetadata(), + previousManifest.getClusterBlocksMetadata(), previousManifest.getIndicesRouting(), + previousManifest.getHashesOfConsistentSettings(), + previousManifest.getClusterStateCustomMap() + ); + + RemoteClusterStateManifestInfo committedManifestDetails = remoteManifestManager.uploadManifest( + clusterState, + uploadedMetadataResults, + previousManifest.getPreviousClusterUUID(), + previousManifest.getDiffManifest(), true ); if (!previousManifest.isClusterUUIDCommitted() && committedManifestDetails.getClusterMetadataManifest().isClusterUUIDCommitted()) { @@ -790,6 +863,21 @@ public RemoteClusterStateManifestInfo markLastStateAsCommitted(ClusterState clus return committedManifestDetails; } + /** + * Fetch latest ClusterMetadataManifest from remote state store + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return ClusterMetadataManifest + */ + public Optional getLatestClusterMetadataManifest(String clusterName, String clusterUUID) { + return remoteManifestManager.getLatestClusterMetadataManifest(clusterName, clusterUUID); + } + + public ClusterMetadataManifest getClusterMetadataManifestByFileName(String clusterUUID, String fileName) { + return remoteManifestManager.getRemoteClusterMetadataManifestByFileName(clusterUUID, fileName); + } + @Override public void close() throws IOException { remoteClusterStateCleanupManager.close(); @@ -808,325 +896,375 @@ public void start() { final Repository repository = repositoriesService.get().repository(remoteStoreRepo); assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository"; blobStoreRepository = (BlobStoreRepository) repository; - remoteClusterStateCleanupManager.start(); this.remoteRoutingTableService.start(); - } - - private RemoteClusterStateManifestInfo uploadManifest( - ClusterState clusterState, - List uploadedIndexMetadata, - String previousClusterUUID, - UploadedMetadataAttribute uploadedCoordinationMetadata, - UploadedMetadataAttribute uploadedSettingsMetadata, - UploadedMetadataAttribute uploadedTemplatesMetadata, - Map uploadedCustomMetadataMap, - List uploadedIndicesRouting, - boolean committed - ) throws IOException { - synchronized (this) { - final String manifestFileName = getManifestFileName( - clusterState.term(), - clusterState.version(), - committed, - MANIFEST_CURRENT_CODEC_VERSION - ); - final ClusterMetadataManifest manifest = ClusterMetadataManifest.builder() - .clusterTerm(clusterState.term()) - .stateVersion(clusterState.getVersion()) - .clusterUUID(clusterState.metadata().clusterUUID()) - .stateUUID(clusterState.stateUUID()) - .opensearchVersion(Version.CURRENT) - .nodeId(nodeId) - .committed(committed) - .codecVersion(MANIFEST_CURRENT_CODEC_VERSION) - .indices(uploadedIndexMetadata) - .previousClusterUUID(previousClusterUUID) - .clusterUUIDCommitted(clusterState.metadata().clusterUUIDCommitted()) - .coordinationMetadata(uploadedCoordinationMetadata) - .settingMetadata(uploadedSettingsMetadata) - .templatesMetadata(uploadedTemplatesMetadata) - .customMetadataMap(uploadedCustomMetadataMap) - .routingTableVersion(clusterState.routingTable().version()) - .indicesRouting(uploadedIndicesRouting) - .build(); - writeMetadataManifest(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID(), manifest, manifestFileName); - return new RemoteClusterStateManifestInfo(manifest, manifestFileName); - } - } - - private void writeMetadataManifest(String clusterName, String clusterUUID, ClusterMetadataManifest uploadManifest, String fileName) - throws IOException { - AtomicReference result = new AtomicReference(); - AtomicReference exceptionReference = new AtomicReference(); - - final BlobContainer metadataManifestContainer = manifestContainer(clusterName, clusterUUID); - - // latch to wait until upload is not finished - CountDownLatch latch = new CountDownLatch(1); - - LatchedActionListener completionListener = new LatchedActionListener<>(ActionListener.wrap(resp -> { - logger.trace(String.format(Locale.ROOT, "Manifest file uploaded successfully.")); - }, ex -> { exceptionReference.set(ex); }), latch); - - getClusterMetadataManifestBlobStoreFormat(fileName).writeAsyncWithUrgentPriority( - uploadManifest, - metadataManifestContainer, - fileName, - blobStoreRepository.getCompressor(), - completionListener, - FORMAT_PARAMS + blobStoreTransferService = new BlobStoreTransferService(getBlobStore(), threadpool); + String clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings).value(); + + remoteGlobalMetadataManager = new RemoteGlobalMetadataManager( + clusterSettings, + clusterName, + blobStoreRepository, + blobStoreTransferService, + namedWriteableRegistry, + threadpool ); - - try { - if (latch.await(getMetadataManifestUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { - RemoteStateTransferException ex = new RemoteStateTransferException( - String.format(Locale.ROOT, "Timed out waiting for transfer of manifest file to complete") - ); - throw ex; - } - } catch (InterruptedException ex) { - RemoteStateTransferException exception = new RemoteStateTransferException( - String.format(Locale.ROOT, "Timed out waiting for transfer of manifest file to complete - %s"), - ex - ); - Thread.currentThread().interrupt(); - throw exception; - } - if (exceptionReference.get() != null) { - throw new RemoteStateTransferException(exceptionReference.get().getMessage(), exceptionReference.get()); - } - logger.debug( - "Metadata manifest file [{}] written during [{}] phase. ", - fileName, - uploadManifest.isCommitted() ? "commit" : "publish" + remoteIndexMetadataManager = new RemoteIndexMetadataManager( + clusterSettings, + clusterName, + blobStoreRepository, + blobStoreTransferService, + threadpool ); + remoteManifestManager = new RemoteManifestManager( + clusterSettings, + clusterName, + nodeId, + blobStoreRepository, + blobStoreTransferService, + threadpool + ); + remoteClusterStateAttributesManager = new RemoteClusterStateAttributesManager( + clusterName, + blobStoreRepository, + blobStoreTransferService, + namedWriteableRegistry, + threadpool + ); + remoteClusterStateCleanupManager.start(); } - ThreadPool getThreadpool() { - return threadpool; - } - - BlobStore getBlobStore() { - return blobStoreRepository.blobStore(); + private void setSlowWriteLoggingThreshold(TimeValue slowWriteLoggingThreshold) { + this.slowWriteLoggingThreshold = slowWriteLoggingThreshold; } - private BlobContainer indexMetadataContainer(String clusterName, String clusterUUID, String indexUUID) { - // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/index/ftqsCnn9TgOX - return blobStoreRepository.blobStore() - .blobContainer(getCusterMetadataBasePath(clusterName, clusterUUID).add(INDEX_PATH_TOKEN).add(indexUUID)); + // Package private for unit test + RemoteRoutingTableService getRemoteRoutingTableService() { + return this.remoteRoutingTableService; } - private BlobContainer globalMetadataContainer(String clusterName, String clusterUUID) { - // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/ - return blobStoreRepository.blobStore() - .blobContainer(getCusterMetadataBasePath(clusterName, clusterUUID).add(GLOBAL_METADATA_PATH_TOKEN)); + ThreadPool getThreadpool() { + return threadpool; } - private BlobContainer manifestContainer(String clusterName, String clusterUUID) { - // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest - return blobStoreRepository.blobStore().blobContainer(getManifestFolderPath(clusterName, clusterUUID)); + BlobStoreRepository getBlobStoreRepository() { + return blobStoreRepository; } - BlobPath getCusterMetadataBasePath(String clusterName, String clusterUUID) { - return blobStoreRepository.basePath().add(encodeString(clusterName)).add(CLUSTER_STATE_PATH_TOKEN).add(clusterUUID); + BlobStore getBlobStore() { + return blobStoreRepository.blobStore(); } - private BlobContainer clusterUUIDContainer(String clusterName) { - return blobStoreRepository.blobStore() - .blobContainer( - blobStoreRepository.basePath() - .add(Base64.getUrlEncoder().withoutPadding().encodeToString(clusterName.getBytes(StandardCharsets.UTF_8))) - .add(CLUSTER_STATE_PATH_TOKEN) + /** + * Fetch latest ClusterState from remote, including global metadata, index metadata and cluster state version + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return {@link IndexMetadata} + */ + public ClusterState getLatestClusterState(String clusterName, String clusterUUID, boolean includeEphemeral) throws IOException { + Optional clusterMetadataManifest = remoteManifestManager.getLatestClusterMetadataManifest( + clusterName, + clusterUUID + ); + if (clusterMetadataManifest.isEmpty()) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Latest cluster metadata manifest is not present for the provided clusterUUID: %s", clusterUUID) ); - } - - private void setSlowWriteLoggingThreshold(TimeValue slowWriteLoggingThreshold) { - this.slowWriteLoggingThreshold = slowWriteLoggingThreshold; - } + } - private void setIndexMetadataUploadTimeout(TimeValue newIndexMetadataUploadTimeout) { - this.indexMetadataUploadTimeout = newIndexMetadataUploadTimeout; + return getClusterStateForManifest(clusterName, clusterMetadataManifest.get(), nodeId, includeEphemeral); } - private void setGlobalMetadataUploadTimeout(TimeValue newGlobalMetadataUploadTimeout) { - this.globalMetadataUploadTimeout = newGlobalMetadataUploadTimeout; - } + private ClusterState readClusterStateInParallel( + ClusterState previousState, + ClusterMetadataManifest manifest, + String clusterUUID, + String localNodeId, + List indicesToRead, + Map customToRead, + boolean readCoordinationMetadata, + boolean readSettingsMetadata, + boolean readTransientSettingsMetadata, + boolean readTemplatesMetadata, + boolean readDiscoveryNodes, + boolean readClusterBlocks, + List indicesRoutingToRead, + boolean readHashesOfConsistentSettings, + Map clusterStateCustomToRead, + boolean includeEphemeral + ) throws IOException { + int totalReadTasks = indicesToRead.size() + customToRead.size() + (readCoordinationMetadata ? 1 : 0) + (readSettingsMetadata + ? 1 + : 0) + (readTemplatesMetadata ? 1 : 0) + (readDiscoveryNodes ? 1 : 0) + (readClusterBlocks ? 1 : 0) + + (readTransientSettingsMetadata ? 1 : 0) + (readHashesOfConsistentSettings ? 1 : 0) + clusterStateCustomToRead.size() + + indicesRoutingToRead.size(); + CountDownLatch latch = new CountDownLatch(totalReadTasks); + List> asyncMetadataReadActions = new ArrayList<>(); + List readResults = Collections.synchronizedList(new ArrayList<>()); + List readIndexRoutingTableResults = Collections.synchronizedList(new ArrayList<>()); + List exceptionList = Collections.synchronizedList(new ArrayList<>(totalReadTasks)); + + LatchedActionListener listener = new LatchedActionListener<>(ActionListener.wrap(response -> { + logger.debug("Successfully read cluster state component from remote"); + readResults.add(response); + }, ex -> { + logger.error("Failed to read cluster state from remote", ex); + exceptionList.add(ex); + }), latch); + + for (UploadedIndexMetadata indexMetadata : indicesToRead) { + asyncMetadataReadActions.add( + remoteIndexMetadataManager.getAsyncIndexMetadataReadAction(clusterUUID, indexMetadata.getUploadedFilename(), listener) + ); + } - private void setMetadataManifestUploadTimeout(TimeValue newMetadataManifestUploadTimeout) { - this.metadataManifestUploadTimeout = newMetadataManifestUploadTimeout; - } + LatchedActionListener routingTableLatchedActionListener = new LatchedActionListener<>( + ActionListener.wrap(response -> { + logger.debug("Successfully read cluster state component from remote"); + readIndexRoutingTableResults.add(response); + }, ex -> { + logger.error("Failed to read cluster state from remote", ex); + exceptionList.add(ex); + }), + latch + ); - private Map getUpdatedCustoms(ClusterState currentState, ClusterState previousState) { - if (Metadata.isCustomMetadataEqual(previousState.metadata(), currentState.metadata())) { - return new HashMap<>(); - } - Map updatedCustom = new HashMap<>(); - Set currentCustoms = new HashSet<>(currentState.metadata().customs().keySet()); - for (Map.Entry cursor : previousState.metadata().customs().entrySet()) { - if (cursor.getValue().context().contains(Metadata.XContentContext.GATEWAY)) { - if (currentCustoms.contains(cursor.getKey()) - && !cursor.getValue().equals(currentState.metadata().custom(cursor.getKey()))) { - // If the custom metadata is updated, we need to upload the new version. - updatedCustom.put(cursor.getKey(), currentState.metadata().custom(cursor.getKey())); - } - currentCustoms.remove(cursor.getKey()); - } - } - for (String custom : currentCustoms) { - Metadata.Custom cursor = currentState.metadata().custom(custom); - if (cursor.context().contains(Metadata.XContentContext.GATEWAY)) { - updatedCustom.put(custom, cursor); - } + for (UploadedIndexMetadata indexRouting : indicesRoutingToRead) { + asyncMetadataReadActions.add( + remoteRoutingTableService.getAsyncIndexRoutingReadAction( + indexRouting.getUploadedFilename(), + new Index(indexRouting.getIndexName(), indexRouting.getIndexUUID()), + routingTableLatchedActionListener + ) + ); } - return updatedCustom; - } - public TimeValue getIndexMetadataUploadTimeout() { - return this.indexMetadataUploadTimeout; - } + for (Map.Entry entry : customToRead.entrySet()) { + asyncMetadataReadActions.add( + remoteGlobalMetadataManager.getAsyncMetadataReadAction( + new RemoteCustomMetadata( + entry.getValue().getUploadedFilename(), + entry.getKey(), + clusterUUID, + blobStoreRepository.getCompressor(), + namedWriteableRegistry + ), + entry.getValue().getAttributeName(), + listener + ) + ); + } - public TimeValue getGlobalMetadataUploadTimeout() { - return this.globalMetadataUploadTimeout; - } + if (readCoordinationMetadata) { + asyncMetadataReadActions.add( + remoteGlobalMetadataManager.getAsyncMetadataReadAction( + new RemoteCoordinationMetadata( + manifest.getCoordinationMetadata().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + COORDINATION_METADATA, + listener + ) + ); + } - public TimeValue getMetadataManifestUploadTimeout() { - return this.metadataManifestUploadTimeout; - } + if (readSettingsMetadata) { + asyncMetadataReadActions.add( + remoteGlobalMetadataManager.getAsyncMetadataReadAction( + new RemotePersistentSettingsMetadata( + manifest.getSettingsMetadata().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + SETTING_METADATA, + listener + ) + ); + } - // Package private for unit test - RemoteRoutingTableService getRemoteRoutingTableService() { - return this.remoteRoutingTableService; - } + if (readTransientSettingsMetadata) { + asyncMetadataReadActions.add( + remoteGlobalMetadataManager.getAsyncMetadataReadAction( + new RemoteTransientSettingsMetadata( + manifest.getTransientSettingsMetadata().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + TRANSIENT_SETTING_METADATA, + listener + ) + ); + } - static String getManifestFileName(long term, long version, boolean committed, int codecVersion) { - // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest/manifest______C/P____ - return String.join( - DELIMITER, - MANIFEST_PATH_TOKEN, - RemoteStoreUtils.invertLong(term), - RemoteStoreUtils.invertLong(version), - (committed ? "C" : "P"), // C for committed and P for published - RemoteStoreUtils.invertLong(System.currentTimeMillis()), - String.valueOf(codecVersion) // Keep the codec version at last place only, during read we reads last place to - // determine codec version. - ); - } + if (readTemplatesMetadata) { + asyncMetadataReadActions.add( + remoteGlobalMetadataManager.getAsyncMetadataReadAction( + new RemoteTemplatesMetadata( + manifest.getTemplatesMetadata().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + TEMPLATES_METADATA, + listener + ) + ); + } - static String indexMetadataFileName(IndexMetadata indexMetadata) { - // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/index//metadata______ - return String.join( - DELIMITER, - METADATA_FILE_PREFIX, - RemoteStoreUtils.invertLong(indexMetadata.getVersion()), - RemoteStoreUtils.invertLong(System.currentTimeMillis()), - String.valueOf(INDEX_METADATA_CURRENT_CODEC_VERSION) // Keep the codec version at last place only, during read we reads last - // place to determine codec version. - ); - } + if (readDiscoveryNodes) { + asyncMetadataReadActions.add( + remoteClusterStateAttributesManager.getAsyncMetadataReadAction( + DISCOVERY_NODES, + new RemoteDiscoveryNodes( + manifest.getDiscoveryNodesMetadata().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor() + ), + listener + ) + ); + } - private static String globalMetadataFileName(Metadata metadata) { - // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/metadata______ - return String.join( - DELIMITER, - METADATA_FILE_PREFIX, - RemoteStoreUtils.invertLong(metadata.version()), - RemoteStoreUtils.invertLong(System.currentTimeMillis()), - String.valueOf(GLOBAL_METADATA_CURRENT_CODEC_VERSION) - ); - } + if (readClusterBlocks) { + asyncMetadataReadActions.add( + remoteClusterStateAttributesManager.getAsyncMetadataReadAction( + CLUSTER_BLOCKS, + new RemoteClusterBlocks( + manifest.getClusterBlocksMetadata().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor() + ), + listener + ) + ); + } - private static String metadataAttributeFileName(String componentPrefix, Long metadataVersion) { - // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/______ - return String.join( - DELIMITER, - componentPrefix, - RemoteStoreUtils.invertLong(metadataVersion), - RemoteStoreUtils.invertLong(System.currentTimeMillis()), - String.valueOf(GLOBAL_METADATA_CURRENT_CODEC_VERSION) - ); - } + if (readHashesOfConsistentSettings) { + asyncMetadataReadActions.add( + remoteGlobalMetadataManager.getAsyncMetadataReadAction( + new RemoteHashesOfConsistentSettings( + manifest.getHashesOfConsistentSettings().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor() + ), + HASHES_OF_CONSISTENT_SETTINGS, + listener + ) + ); + } - BlobPath getManifestFolderPath(String clusterName, String clusterUUID) { - return getCusterMetadataBasePath(clusterName, clusterUUID).add(MANIFEST_PATH_TOKEN); - } + for (Map.Entry entry : clusterStateCustomToRead.entrySet()) { + asyncMetadataReadActions.add( + remoteClusterStateAttributesManager.getAsyncMetadataReadAction( + CLUSTER_STATE_CUSTOM, + new RemoteClusterStateCustoms( + entry.getValue().getUploadedFilename(), + entry.getValue().getAttributeName(), + clusterUUID, + blobStoreRepository.getCompressor(), + namedWriteableRegistry + ), + listener + ) + ); + } - /** - * Fetch latest index metadata from remote cluster state - * - * @param clusterUUID uuid of cluster state to refer to in remote - * @param clusterName name of the cluster - * @param clusterMetadataManifest manifest file of cluster - * @return {@code Map} latest IndexUUID to IndexMetadata map - */ - private Map getIndexMetadataMap( - String clusterName, - String clusterUUID, - ClusterMetadataManifest clusterMetadataManifest - ) { - assert Objects.equals(clusterUUID, clusterMetadataManifest.getClusterUUID()) - : "Corrupt ClusterMetadataManifest found. Cluster UUID mismatch."; - Map remoteIndexMetadata = new HashMap<>(); - for (UploadedIndexMetadata uploadedIndexMetadata : clusterMetadataManifest.getIndices()) { - IndexMetadata indexMetadata = getIndexMetadata(clusterName, clusterUUID, uploadedIndexMetadata); - remoteIndexMetadata.put(uploadedIndexMetadata.getIndexUUID(), indexMetadata); + for (CheckedRunnable asyncMetadataReadAction : asyncMetadataReadActions) { + asyncMetadataReadAction.run(); } - return remoteIndexMetadata; - } - /** - * Fetch index metadata from remote cluster state - * - * @param clusterUUID uuid of cluster state to refer to in remote - * @param clusterName name of the cluster - * @param uploadedIndexMetadata {@link UploadedIndexMetadata} contains details about remote location of index metadata - * @return {@link IndexMetadata} - */ - private IndexMetadata getIndexMetadata(String clusterName, String clusterUUID, UploadedIndexMetadata uploadedIndexMetadata) { - BlobContainer blobContainer = indexMetadataContainer(clusterName, clusterUUID, uploadedIndexMetadata.getIndexUUID()); try { - String[] splitPath = uploadedIndexMetadata.getUploadedFilename().split("/"); - return INDEX_METADATA_FORMAT.read( - blobContainer, - splitPath[splitPath.length - 1], - blobStoreRepository.getNamedXContentRegistry() - ); - } catch (IOException e) { - throw new IllegalStateException( - String.format(Locale.ROOT, "Error while downloading IndexMetadata - %s", uploadedIndexMetadata.getUploadedFilename()), - e + if (latch.await(this.remoteStateReadTimeout.getMillis(), TimeUnit.MILLISECONDS) == false) { + RemoteStateTransferException exception = new RemoteStateTransferException( + "Timed out waiting to read cluster state from remote within timeout " + this.remoteStateReadTimeout + ); + exceptionList.forEach(exception::addSuppressed); + throw exception; + } + } catch (InterruptedException e) { + exceptionList.forEach(e::addSuppressed); + RemoteStateTransferException ex = new RemoteStateTransferException( + "Interrupted while waiting to read cluster state from metadata" ); + Thread.currentThread().interrupt(); + throw ex; } - } - /** - * Fetch latest ClusterState from remote, including global metadata, index metadata and cluster state version - * - * @param clusterUUID uuid of cluster state to refer to in remote - * @param clusterName name of the cluster - * @return {@link IndexMetadata} - */ - public ClusterState getLatestClusterState(String clusterName, String clusterUUID) { - Optional clusterMetadataManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); - if (clusterMetadataManifest.isEmpty()) { - throw new IllegalStateException( - String.format(Locale.ROOT, "Latest cluster metadata manifest is not present for the provided clusterUUID: %s", clusterUUID) - ); + if (!exceptionList.isEmpty()) { + RemoteStateTransferException exception = new RemoteStateTransferException("Exception during reading cluster state from remote"); + exceptionList.forEach(exception::addSuppressed); + throw exception; } - // Fetch Global Metadata - Metadata globalMetadata = getGlobalMetadata(clusterName, clusterUUID, clusterMetadataManifest.get()); + final ClusterState.Builder clusterStateBuilder = ClusterState.builder(previousState); + AtomicReference discoveryNodesBuilder = new AtomicReference<>(DiscoveryNodes.builder()); + Metadata.Builder metadataBuilder = Metadata.builder(previousState.metadata()); + metadataBuilder.version(manifest.getMetadataVersion()); + metadataBuilder.clusterUUID(manifest.getClusterUUID()); + metadataBuilder.clusterUUIDCommitted(manifest.isClusterUUIDCommitted()); + Map indexMetadataMap = new HashMap<>(); + Map indicesRouting = new HashMap<>(previousState.routingTable().getIndicesRouting()); + + readResults.forEach(remoteReadResult -> { + switch (remoteReadResult.getComponent()) { + case RemoteIndexMetadata.INDEX: + IndexMetadata indexMetadata = (IndexMetadata) remoteReadResult.getObj(); + indexMetadataMap.put(indexMetadata.getIndex().getName(), indexMetadata); + break; + case CUSTOM_METADATA: + Metadata.Custom metadataCustom = (Metadata.Custom) remoteReadResult.getObj(); + if (includeEphemeral || (!includeEphemeral && metadataCustom.context().contains(XContentContext.GATEWAY))) { + metadataBuilder.putCustom(remoteReadResult.getComponentName(), (Metadata.Custom) remoteReadResult.getObj()); + } + break; + case COORDINATION_METADATA: + metadataBuilder.coordinationMetadata((CoordinationMetadata) remoteReadResult.getObj()); + break; + case SETTING_METADATA: + metadataBuilder.persistentSettings((Settings) remoteReadResult.getObj()); + break; + case TRANSIENT_SETTING_METADATA: + metadataBuilder.transientSettings((Settings) remoteReadResult.getObj()); + break; + case TEMPLATES_METADATA: + metadataBuilder.templates((TemplatesMetadata) remoteReadResult.getObj()); + break; + case HASHES_OF_CONSISTENT_SETTINGS: + metadataBuilder.hashesOfConsistentSettings((DiffableStringMap) remoteReadResult.getObj()); + break; + case CLUSTER_STATE_ATTRIBUTE: + if (remoteReadResult.getComponentName().equals(DISCOVERY_NODES)) { + discoveryNodesBuilder.set(DiscoveryNodes.builder((DiscoveryNodes) remoteReadResult.getObj())); + } else if (remoteReadResult.getComponentName().equals(CLUSTER_BLOCKS)) { + clusterStateBuilder.blocks((ClusterBlocks) remoteReadResult.getObj()); + } else if (remoteReadResult.getComponentName().startsWith(CLUSTER_STATE_CUSTOM)) { + // component name for mat is "cluster-state-custom--custom_name" + String custom = remoteReadResult.getComponentName().split(CUSTOM_DELIMITER)[1]; + clusterStateBuilder.putCustom(custom, (ClusterState.Custom) remoteReadResult.getObj()); + } + break; + default: + throw new IllegalStateException("Unknown component: " + remoteReadResult.getComponent()); + } + }); - // Fetch Index Metadata - Map indices = getIndexMetadataMap(clusterName, clusterUUID, clusterMetadataManifest.get()); + metadataBuilder.indices(indexMetadataMap); + if (readDiscoveryNodes) { + clusterStateBuilder.nodes(discoveryNodesBuilder.get().localNodeId(localNodeId)); + } - Map indexMetadataMap = new HashMap<>(); - indices.values().forEach(indexMetadata -> { indexMetadataMap.put(indexMetadata.getIndex().getName(), indexMetadata); }); + clusterStateBuilder.metadata(metadataBuilder).version(manifest.getStateVersion()).stateUUID(manifest.getStateUUID()); - return ClusterState.builder(ClusterState.EMPTY_STATE) - .version(clusterMetadataManifest.get().getStateVersion()) - .metadata(Metadata.builder(globalMetadata).indices(indexMetadataMap).build()) - .build(); + readIndexRoutingTableResults.forEach( + indexRoutingTable -> indicesRouting.put(indexRoutingTable.getIndex().getName(), indexRoutingTable) + ); + clusterStateBuilder.routingTable(new RoutingTable(manifest.getRoutingTableVersion(), indicesRouting)); + + return clusterStateBuilder.build(); } public ClusterState getClusterStateForManifest( @@ -1134,172 +1272,137 @@ public ClusterState getClusterStateForManifest( ClusterMetadataManifest manifest, String localNodeId, boolean includeEphemeral - ) { - // TODO https://github.com/opensearch-project/OpenSearch/pull/14089 - return null; + ) throws IOException { + if (manifest.onOrAfterCodecVersion(CODEC_V2)) { + return readClusterStateInParallel( + ClusterState.builder(new ClusterName(clusterName)).build(), + manifest, + manifest.getClusterUUID(), + localNodeId, + manifest.getIndices(), + manifest.getCustomMetadataMap(), + manifest.getCoordinationMetadata() != null, + manifest.getSettingsMetadata() != null, + manifest.getTransientSettingsMetadata() != null, + manifest.getTemplatesMetadata() != null, + includeEphemeral && manifest.getDiscoveryNodesMetadata() != null, + includeEphemeral && manifest.getClusterBlocksMetadata() != null, + includeEphemeral ? manifest.getIndicesRouting() : emptyList(), + includeEphemeral && manifest.getHashesOfConsistentSettings() != null, + includeEphemeral ? manifest.getClusterStateCustomMap() : emptyMap(), + includeEphemeral + ); + } else { + ClusterState clusterState = readClusterStateInParallel( + ClusterState.builder(new ClusterName(clusterName)).build(), + manifest, + manifest.getClusterUUID(), + localNodeId, + manifest.getIndices(), + // for manifest codec V1, we don't have the following objects to read, so not passing anything + emptyMap(), + false, + false, + false, + false, + false, + false, + emptyList(), + false, + emptyMap(), + false + ); + Metadata.Builder mb = Metadata.builder(remoteGlobalMetadataManager.getGlobalMetadata(manifest.getClusterUUID(), manifest)); + mb.indices(clusterState.metadata().indices()); + return ClusterState.builder(clusterState).metadata(mb).build(); + } + } public ClusterState getClusterStateUsingDiff( String clusterName, ClusterMetadataManifest manifest, - ClusterState previousClusterState, + ClusterState previousState, String localNodeId - ) { - // TODO https://github.com/opensearch-project/OpenSearch/pull/14089 - return null; - } - - public ClusterMetadataManifest getClusterMetadataManifestByFileName(String clusterUUID, String manifestFileName) { - // TODO https://github.com/opensearch-project/OpenSearch/pull/14089 - return null; - } - - private Metadata getGlobalMetadata(String clusterName, String clusterUUID, ClusterMetadataManifest clusterMetadataManifest) { - String globalMetadataFileName = clusterMetadataManifest.getGlobalMetadataFileName(); - try { - // Fetch Global metadata - if (globalMetadataFileName != null) { - String[] splitPath = globalMetadataFileName.split("/"); - return GLOBAL_METADATA_FORMAT.read( - globalMetadataContainer(clusterName, clusterUUID), - splitPath[splitPath.length - 1], - blobStoreRepository.getNamedXContentRegistry() - ); - } else if (clusterMetadataManifest.hasMetadataAttributesFiles()) { - CoordinationMetadata coordinationMetadata = getCoordinationMetadata( - clusterName, - clusterUUID, - clusterMetadataManifest.getCoordinationMetadata().getUploadedFilename() - ); - Settings settingsMetadata = getSettingsMetadata( - clusterName, - clusterUUID, - clusterMetadataManifest.getSettingsMetadata().getUploadedFilename() - ); - TemplatesMetadata templatesMetadata = getTemplatesMetadata( - clusterName, - clusterUUID, - clusterMetadataManifest.getTemplatesMetadata().getUploadedFilename() - ); - Metadata.Builder builder = new Metadata.Builder(); - builder.coordinationMetadata(coordinationMetadata); - builder.persistentSettings(settingsMetadata); - builder.templates(templatesMetadata); - clusterMetadataManifest.getCustomMetadataMap() - .forEach( - (key, value) -> builder.putCustom( - key, - getCustomsMetadata(clusterName, clusterUUID, value.getUploadedFilename(), key) - ) - ); - return builder.build(); - } else { - return Metadata.EMPTY_METADATA; + ) throws IOException { + assert manifest.getDiffManifest() != null; + ClusterStateDiffManifest diff = manifest.getDiffManifest(); + List updatedIndices = diff.getIndicesUpdated().stream().map(idx -> { + Optional uploadedIndexMetadataOptional = manifest.getIndices() + .stream() + .filter(idx2 -> idx2.getIndexName().equals(idx)) + .findFirst(); + assert uploadedIndexMetadataOptional.isPresent() == true; + return uploadedIndexMetadataOptional.get(); + }).collect(Collectors.toList()); + + Map updatedCustomMetadata = new HashMap<>(); + if (diff.getCustomMetadataUpdated() != null) { + for (String customType : diff.getCustomMetadataUpdated()) { + updatedCustomMetadata.put(customType, manifest.getCustomMetadataMap().get(customType)); } - } catch (IOException e) { - throw new IllegalStateException( - String.format(Locale.ROOT, "Error while downloading Global Metadata - %s", globalMetadataFileName), - e - ); } - } - - private CoordinationMetadata getCoordinationMetadata(String clusterName, String clusterUUID, String coordinationMetadataFileName) { - try { - // Fetch Coordination metadata - if (coordinationMetadataFileName != null) { - String[] splitPath = coordinationMetadataFileName.split("/"); - return COORDINATION_METADATA_FORMAT.read( - globalMetadataContainer(clusterName, clusterUUID), - splitPath[splitPath.length - 1], - blobStoreRepository.getNamedXContentRegistry() - ); - } else { - return CoordinationMetadata.EMPTY_METADATA; + Map updatedClusterStateCustom = new HashMap<>(); + if (diff.getClusterStateCustomUpdated() != null) { + for (String customType : diff.getClusterStateCustomUpdated()) { + updatedClusterStateCustom.put(customType, manifest.getClusterStateCustomMap().get(customType)); } - } catch (IOException e) { - throw new IllegalStateException( - String.format(Locale.ROOT, "Error while downloading Coordination Metadata - %s", coordinationMetadataFileName), - e - ); } - } - private Settings getSettingsMetadata(String clusterName, String clusterUUID, String settingsMetadataFileName) { - try { - // Fetch Settings metadata - if (settingsMetadataFileName != null) { - String[] splitPath = settingsMetadataFileName.split("/"); - return SETTINGS_METADATA_FORMAT.read( - globalMetadataContainer(clusterName, clusterUUID), - splitPath[splitPath.length - 1], - blobStoreRepository.getNamedXContentRegistry() - ); - } else { - return Settings.EMPTY; + List updatedIndexRouting = new ArrayList<>(); + updatedIndexRouting.addAll( + remoteRoutingTableService.getUpdatedIndexRoutingTableMetadata(diff.getIndicesRoutingUpdated(), manifest.getIndicesRouting()) + ); + + ClusterState updatedClusterState = readClusterStateInParallel( + previousState, + manifest, + manifest.getClusterUUID(), + localNodeId, + updatedIndices, + updatedCustomMetadata, + diff.isCoordinationMetadataUpdated(), + diff.isSettingsMetadataUpdated(), + diff.isTransientSettingsMetadataUpdated(), + diff.isTemplatesMetadataUpdated(), + diff.isDiscoveryNodesUpdated(), + diff.isClusterBlocksUpdated(), + updatedIndexRouting, + diff.isHashesOfConsistentSettingsUpdated(), + updatedClusterStateCustom, + true + ); + ClusterState.Builder clusterStateBuilder = ClusterState.builder(updatedClusterState); + Metadata.Builder metadataBuilder = Metadata.builder(updatedClusterState.metadata()); + // remove the deleted indices from the metadata + for (String index : diff.getIndicesDeleted()) { + metadataBuilder.remove(index); + } + // remove the deleted metadata customs from the metadata + if (diff.getCustomMetadataDeleted() != null) { + for (String customType : diff.getCustomMetadataDeleted()) { + metadataBuilder.removeCustom(customType); } - } catch (IOException e) { - throw new IllegalStateException( - String.format(Locale.ROOT, "Error while downloading Settings Metadata - %s", settingsMetadataFileName), - e - ); } - } - private TemplatesMetadata getTemplatesMetadata(String clusterName, String clusterUUID, String templatesMetadataFileName) { - try { - // Fetch Templates metadata - if (templatesMetadataFileName != null) { - String[] splitPath = templatesMetadataFileName.split("/"); - return TEMPLATES_METADATA_FORMAT.read( - globalMetadataContainer(clusterName, clusterUUID), - splitPath[splitPath.length - 1], - blobStoreRepository.getNamedXContentRegistry() - ); - } else { - return TemplatesMetadata.EMPTY_METADATA; + // remove the deleted cluster state customs from the metadata + if (diff.getClusterStateCustomDeleted() != null) { + for (String customType : diff.getClusterStateCustomDeleted()) { + clusterStateBuilder.removeCustom(customType); } - } catch (IOException e) { - throw new IllegalStateException( - String.format(Locale.ROOT, "Error while downloading Templates Metadata - %s", templatesMetadataFileName), - e - ); } - } - private Metadata.Custom getCustomsMetadata(String clusterName, String clusterUUID, String customMetadataFileName, String custom) { - requireNonNull(customMetadataFileName); - try { - // Fetch Custom metadata - String[] splitPath = customMetadataFileName.split("/"); - ChecksumBlobStoreFormat customChecksumBlobStoreFormat = new ChecksumBlobStoreFormat<>( - "custom", - METADATA_NAME_FORMAT, - (parser -> Metadata.Custom.fromXContent(parser, custom)) - ); - return customChecksumBlobStoreFormat.read( - globalMetadataContainer(clusterName, clusterUUID), - splitPath[splitPath.length - 1], - blobStoreRepository.getNamedXContentRegistry() - ); - } catch (IOException e) { - throw new IllegalStateException( - String.format(Locale.ROOT, "Error while downloading Custom Metadata - %s", customMetadataFileName), - e - ); + HashMap indexRoutingTables = new HashMap<>(updatedClusterState.getRoutingTable().getIndicesRouting()); + + for (String indexName : diff.getIndicesRoutingDeleted()) { + indexRoutingTables.remove(indexName); } - } - /** - * Fetch latest ClusterMetadataManifest from remote state store - * - * @param clusterUUID uuid of cluster state to refer to in remote - * @param clusterName name of the cluster - * @return ClusterMetadataManifest - */ - public Optional getLatestClusterMetadataManifest(String clusterName, String clusterUUID) { - Optional latestManifestFileName = getLatestManifestFileName(clusterName, clusterUUID); - return latestManifestFileName.map(s -> fetchRemoteClusterMetadataManifest(clusterName, clusterUUID, s)); + return clusterStateBuilder.stateUUID(manifest.getStateUUID()) + .version(manifest.getStateVersion()) + .metadata(metadataBuilder) + .routingTable(new RoutingTable(manifest.getRoutingTableVersion(), indexRoutingTables)) + .build(); } /** @@ -1311,7 +1414,10 @@ public Optional getLatestClusterMetadataManifest(String public String getLastKnownUUIDFromRemote(String clusterName) { try { Set clusterUUIDs = getAllClusterUUIDs(clusterName); - Map latestManifests = getLatestManifestForAllClusterUUIDs(clusterName, clusterUUIDs); + Map latestManifests = remoteManifestManager.getLatestManifestForAllClusterUUIDs( + clusterName, + clusterUUIDs + ); List validChain = createClusterChain(latestManifests, clusterName); if (validChain.isEmpty()) { return ClusterState.UNKNOWN_UUID; @@ -1325,8 +1431,19 @@ public String getLastKnownUUIDFromRemote(String clusterName) { } } + public void setRemoteStateReadTimeout(TimeValue remoteStateReadTimeout) { + this.remoteStateReadTimeout = remoteStateReadTimeout; + } + + private BlobStoreTransferService getBlobStoreTransferService() { + if (blobStoreTransferService == null) { + blobStoreTransferService = new BlobStoreTransferService(getBlobStore(), threadpool); + } + return blobStoreTransferService; + } + Set getAllClusterUUIDs(String clusterName) throws IOException { - Map clusterUUIDMetadata = clusterUUIDContainer(clusterName).children(); + Map clusterUUIDMetadata = clusterUUIDContainer(blobStoreRepository, clusterName).children(); if (clusterUUIDMetadata == null) { return Collections.emptySet(); } @@ -1371,7 +1488,7 @@ private List createClusterChain(final Map 1) { logger.info("Top level cluster UUIDs: {}", topLevelClusterUUIDs); @@ -1427,7 +1544,7 @@ private Map trimClusterUUIDs( if (!ClusterState.UNKNOWN_UUID.equals(currentManifest.getPreviousClusterUUID())) { ClusterMetadataManifest previousManifest = trimmedUUIDs.get(currentManifest.getPreviousClusterUUID()); if (isMetadataEqual(currentManifest, previousManifest, clusterName) - && isGlobalMetadataEqual(currentManifest, previousManifest, clusterName)) { + && remoteGlobalMetadataManager.isGlobalMetadataEqual(currentManifest, previousManifest, clusterName)) { trimmedUUIDs.remove(clusterUUID); } } @@ -1442,14 +1559,20 @@ private boolean isMetadataEqual(ClusterMetadataManifest first, ClusterMetadataMa } final Map secondIndices = second.getIndices() .stream() - .collect(Collectors.toMap(md -> md.getIndexName(), Function.identity())); + .collect(Collectors.toMap(UploadedIndexMetadata::getIndexName, Function.identity())); for (UploadedIndexMetadata uploadedIndexMetadata : first.getIndices()) { - final IndexMetadata firstIndexMetadata = getIndexMetadata(clusterName, first.getClusterUUID(), uploadedIndexMetadata); + final IndexMetadata firstIndexMetadata = remoteIndexMetadataManager.getIndexMetadata( + uploadedIndexMetadata, + first.getClusterUUID() + ); final UploadedIndexMetadata secondUploadedIndexMetadata = secondIndices.get(uploadedIndexMetadata.getIndexName()); if (secondUploadedIndexMetadata == null) { return false; } - final IndexMetadata secondIndexMetadata = getIndexMetadata(clusterName, second.getClusterUUID(), secondUploadedIndexMetadata); + final IndexMetadata secondIndexMetadata = remoteIndexMetadataManager.getIndexMetadata( + secondUploadedIndexMetadata, + second.getClusterUUID() + ); if (firstIndexMetadata.equals(secondIndexMetadata) == false) { return false; } @@ -1457,160 +1580,15 @@ private boolean isMetadataEqual(ClusterMetadataManifest first, ClusterMetadataMa return true; } - private boolean isGlobalMetadataEqual(ClusterMetadataManifest first, ClusterMetadataManifest second, String clusterName) { - Metadata secondGlobalMetadata = getGlobalMetadata(clusterName, second.getClusterUUID(), second); - Metadata firstGlobalMetadata = getGlobalMetadata(clusterName, first.getClusterUUID(), first); - return Metadata.isGlobalResourcesMetadataEquals(firstGlobalMetadata, secondGlobalMetadata); - } - private boolean isValidClusterUUID(ClusterMetadataManifest manifest) { return manifest.isClusterUUIDCommitted(); } - /** - * Fetch ClusterMetadataManifest files from remote state store in order - * - * @param clusterUUID uuid of cluster state to refer to in remote - * @param clusterName name of the cluster - * @param limit max no of files to fetch - * @return all manifest file names - */ - private List getManifestFileNames(String clusterName, String clusterUUID, int limit) throws IllegalStateException { - try { - - /* - {@link BlobContainer#listBlobsByPrefixInSortedOrder} will list the latest manifest file first - as the manifest file name generated via {@link RemoteClusterStateService#getManifestFileName} ensures - when sorted in LEXICOGRAPHIC order the latest uploaded manifest file comes on top. - */ - return manifestContainer(clusterName, clusterUUID).listBlobsByPrefixInSortedOrder( - MANIFEST_FILE_PREFIX + DELIMITER, - limit, - BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC - ); - } catch (IOException e) { - throw new IllegalStateException("Error while fetching latest manifest file for remote cluster state", e); - } - } - - /** - * Fetch latest ClusterMetadataManifest file from remote state store - * - * @param clusterUUID uuid of cluster state to refer to in remote - * @param clusterName name of the cluster - * @return latest ClusterMetadataManifest filename - */ - private Optional getLatestManifestFileName(String clusterName, String clusterUUID) throws IllegalStateException { - List manifestFilesMetadata = getManifestFileNames(clusterName, clusterUUID, 1); - if (manifestFilesMetadata != null && !manifestFilesMetadata.isEmpty()) { - return Optional.of(manifestFilesMetadata.get(0).name()); - } - logger.info("No manifest file present in remote store for cluster name: {}, cluster UUID: {}", clusterName, clusterUUID); - return Optional.empty(); - } - - /** - * Fetch ClusterMetadataManifest from remote state store - * - * @param clusterUUID uuid of cluster state to refer to in remote - * @param clusterName name of the cluster - * @return ClusterMetadataManifest - */ - ClusterMetadataManifest fetchRemoteClusterMetadataManifest(String clusterName, String clusterUUID, String filename) - throws IllegalStateException { - try { - return getClusterMetadataManifestBlobStoreFormat(filename).read( - manifestContainer(clusterName, clusterUUID), - filename, - blobStoreRepository.getNamedXContentRegistry() - ); - } catch (IOException e) { - throw new IllegalStateException(String.format(Locale.ROOT, "Error while downloading cluster metadata - %s", filename), e); - } - } - - private ChecksumBlobStoreFormat getClusterMetadataManifestBlobStoreFormat(String fileName) { - long codecVersion = getManifestCodecVersion(fileName); - if (codecVersion == MANIFEST_CURRENT_CODEC_VERSION) { - return CLUSTER_METADATA_MANIFEST_FORMAT; - } else if (codecVersion == ClusterMetadataManifest.CODEC_V1) { - return CLUSTER_METADATA_MANIFEST_FORMAT_V1; - } else if (codecVersion == ClusterMetadataManifest.CODEC_V0) { - return CLUSTER_METADATA_MANIFEST_FORMAT_V0; - } - - throw new IllegalArgumentException("Cluster metadata manifest file is corrupted, don't have valid codec version"); - } - - private int getManifestCodecVersion(String fileName) { - String[] splitName = fileName.split(DELIMITER); - if (splitName.length == SPLITED_MANIFEST_FILE_LENGTH) { - return Integer.parseInt(splitName[splitName.length - 1]); // Last value would be codec version. - } else if (splitName.length < SPLITED_MANIFEST_FILE_LENGTH) { // Where codec is not part of file name, i.e. default codec version 0 - // is used. - return ClusterMetadataManifest.CODEC_V0; - } else { - throw new IllegalArgumentException("Manifest file name is corrupted"); - } - } - - public static String encodeString(String content) { - return Base64.getUrlEncoder().withoutPadding().encodeToString(content.getBytes(StandardCharsets.UTF_8)); - } - public void writeMetadataFailed() { getStats().stateFailed(); } - /** - * Exception for Remote state transfer. - */ - public static class RemoteStateTransferException extends RuntimeException { - - public RemoteStateTransferException(String errorDesc) { - super(errorDesc); - } - - public RemoteStateTransferException(String errorDesc, Throwable cause) { - super(errorDesc, cause); - } - } - public RemotePersistenceStats getStats() { return remoteStateStats; } - - private static class UploadedMetadataResults { - List uploadedIndexMetadata; - Map uploadedCustomMetadataMap; - UploadedMetadataAttribute uploadedCoordinationMetadata; - UploadedMetadataAttribute uploadedSettingsMetadata; - UploadedMetadataAttribute uploadedTemplatesMetadata; - List uploadedIndicesRoutingMetadata; - - public UploadedMetadataResults( - List uploadedIndexMetadata, - Map uploadedCustomMetadataMap, - UploadedMetadataAttribute uploadedCoordinationMetadata, - UploadedMetadataAttribute uploadedSettingsMetadata, - UploadedMetadataAttribute uploadedTemplatesMetadata, - List uploadedIndicesRoutingMetadata - ) { - this.uploadedIndexMetadata = uploadedIndexMetadata; - this.uploadedCustomMetadataMap = uploadedCustomMetadataMap; - this.uploadedCoordinationMetadata = uploadedCoordinationMetadata; - this.uploadedSettingsMetadata = uploadedSettingsMetadata; - this.uploadedTemplatesMetadata = uploadedTemplatesMetadata; - this.uploadedIndicesRoutingMetadata = uploadedIndicesRoutingMetadata; - } - - public UploadedMetadataResults() { - this.uploadedIndexMetadata = new ArrayList<>(); - this.uploadedCustomMetadataMap = new HashMap<>(); - this.uploadedCoordinationMetadata = null; - this.uploadedSettingsMetadata = null; - this.uploadedTemplatesMetadata = null; - this.uploadedIndicesRoutingMetadata = new ArrayList<>(); - } - } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateUtils.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateUtils.java index 8efde7ee45f49..f2b93c3784407 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateUtils.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateUtils.java @@ -22,6 +22,8 @@ import java.util.Locale; import java.util.Map; +import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V1; + /** * Utility class for Remote Cluster State */ @@ -33,6 +35,7 @@ public class RemoteClusterStateUtils { public static final String GLOBAL_METADATA_PATH_TOKEN = "global-metadata"; public static final String CLUSTER_STATE_EPHEMERAL_PATH_TOKEN = "ephemeral"; public static final int GLOBAL_METADATA_CURRENT_CODEC_VERSION = 1; + public static final String METADATA_FILE_PREFIX = "metadata"; public static final String CUSTOM_DELIMITER = "--"; public static final String PATH_DELIMITER = "/"; public static final String METADATA_NAME_PLAIN_FORMAT = "%s"; @@ -43,7 +46,7 @@ public class RemoteClusterStateUtils { Map.of(Metadata.CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_GATEWAY) ); - public static BlobPath getCusterMetadataBasePath(BlobStoreRepository blobStoreRepository, String clusterName, String clusterUUID) { + public static BlobPath getClusterMetadataBasePath(BlobStoreRepository blobStoreRepository, String clusterName, String clusterUUID) { return blobStoreRepository.basePath().add(encodeString(clusterName)).add(CLUSTER_STATE_PATH_TOKEN).add(clusterUUID); } @@ -51,8 +54,11 @@ public static String encodeString(String content) { return Base64.getUrlEncoder().withoutPadding().encodeToString(content.getBytes(StandardCharsets.UTF_8)); } - public static String getFormattedFileName(String fileName, int codecVersion) { - if (codecVersion < ClusterMetadataManifest.CODEC_V2) { + public static String getFormattedIndexFileName(String fileName) { + String[] pathTokens = fileName.split(DELIMITER); + // last value added is the codec version in IndexMetadata file + int codecVersion = Integer.parseInt(pathTokens[pathTokens.length - 1]); + if (codecVersion == CODEC_V1) { return String.format(Locale.ROOT, METADATA_NAME_FORMAT, fileName); } return fileName; @@ -88,8 +94,8 @@ public UploadedMetadataResults( Map uploadedCustomMetadataMap, ClusterMetadataManifest.UploadedMetadataAttribute uploadedCoordinationMetadata, ClusterMetadataManifest.UploadedMetadataAttribute uploadedSettingsMetadata, - ClusterMetadataManifest.UploadedMetadataAttribute uploadedTransientSettingsMetadata, ClusterMetadataManifest.UploadedMetadataAttribute uploadedTemplatesMetadata, + ClusterMetadataManifest.UploadedMetadataAttribute uploadedTransientSettingsMetadata, ClusterMetadataManifest.UploadedMetadataAttribute uploadedDiscoveryNodes, ClusterMetadataManifest.UploadedMetadataAttribute uploadedClusterBlocks, List uploadedIndicesRoutingMetadata, diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java new file mode 100644 index 0000000000000..2c5aad99adc0c --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java @@ -0,0 +1,329 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.DiffableUtils; +import org.opensearch.cluster.DiffableUtils.NonDiffableValueSerializer; +import org.opensearch.cluster.coordination.CoordinationMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.Metadata.Custom; +import org.opensearch.cluster.metadata.Metadata.XContentContext; +import org.opensearch.cluster.metadata.TemplatesMetadata; +import org.opensearch.common.CheckedRunnable; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.RemoteWritableEntityStore; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; +import org.opensearch.gateway.remote.model.RemoteCoordinationMetadata; +import org.opensearch.gateway.remote.model.RemoteCustomMetadata; +import org.opensearch.gateway.remote.model.RemoteGlobalMetadata; +import org.opensearch.gateway.remote.model.RemoteHashesOfConsistentSettings; +import org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata; +import org.opensearch.gateway.remote.model.RemoteReadResult; +import org.opensearch.gateway.remote.model.RemoteTemplatesMetadata; +import org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.Map.Entry; +import java.util.stream.Collectors; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.METADATA_NAME_FORMAT; + +/** + * A Manager which provides APIs to write and read Global Metadata attributes to remote store + * + * @opensearch.internal + */ +public class RemoteGlobalMetadataManager { + + public static final TimeValue GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + + public static final Setting GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.state.global_metadata.upload_timeout", + GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final int GLOBAL_METADATA_CURRENT_CODEC_VERSION = 1; + + private volatile TimeValue globalMetadataUploadTimeout; + private Map remoteWritableEntityStores; + private final Compressor compressor; + private final NamedXContentRegistry namedXContentRegistry; + private final NamedWriteableRegistry namedWriteableRegistry; + + RemoteGlobalMetadataManager( + ClusterSettings clusterSettings, + String clusterName, + BlobStoreRepository blobStoreRepository, + BlobStoreTransferService blobStoreTransferService, + NamedWriteableRegistry namedWriteableRegistry, + ThreadPool threadpool + ) { + this.globalMetadataUploadTimeout = clusterSettings.get(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING); + this.compressor = blobStoreRepository.getCompressor(); + this.namedXContentRegistry = blobStoreRepository.getNamedXContentRegistry(); + this.namedWriteableRegistry = namedWriteableRegistry; + this.remoteWritableEntityStores = new HashMap<>(); + this.remoteWritableEntityStores.put( + RemoteGlobalMetadata.GLOBAL_METADATA, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); + this.remoteWritableEntityStores.put( + RemoteCoordinationMetadata.COORDINATION_METADATA, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); + this.remoteWritableEntityStores.put( + RemotePersistentSettingsMetadata.SETTING_METADATA, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); + this.remoteWritableEntityStores.put( + RemoteTransientSettingsMetadata.TRANSIENT_SETTING_METADATA, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); + this.remoteWritableEntityStores.put( + RemoteHashesOfConsistentSettings.HASHES_OF_CONSISTENT_SETTINGS, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); + this.remoteWritableEntityStores.put( + RemoteTemplatesMetadata.TEMPLATES_METADATA, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); + this.remoteWritableEntityStores.put( + RemoteCustomMetadata.CUSTOM_METADATA, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); + clusterSettings.addSettingsUpdateConsumer(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, this::setGlobalMetadataUploadTimeout); + } + + /** + * Allows async upload of Metadata components to remote + */ + CheckedRunnable getAsyncMetadataWriteAction( + AbstractRemoteWritableBlobEntity writeEntity, + LatchedActionListener latchedActionListener + ) { + return (() -> getStore(writeEntity).writeAsync(writeEntity, getActionListener(writeEntity, latchedActionListener))); + } + + private RemoteWritableEntityStore getStore(AbstractRemoteWritableBlobEntity entity) { + RemoteWritableEntityStore remoteStore = remoteWritableEntityStores.get(entity.getType()); + if (remoteStore == null) { + throw new IllegalArgumentException("Unknown entity type [" + entity.getType() + "]"); + } + return remoteStore; + } + + private ActionListener getActionListener( + AbstractRemoteWritableBlobEntity remoteBlobStoreObject, + LatchedActionListener latchedActionListener + ) { + return ActionListener.wrap( + resp -> latchedActionListener.onResponse(remoteBlobStoreObject.getUploadedMetadata()), + ex -> latchedActionListener.onFailure( + new RemoteStateTransferException("Upload failed for " + remoteBlobStoreObject.getType(), ex) + ) + ); + } + + CheckedRunnable getAsyncMetadataReadAction( + AbstractRemoteWritableBlobEntity readEntity, + String componentName, + LatchedActionListener listener + ) { + ActionListener actionListener = ActionListener.wrap( + response -> listener.onResponse(new RemoteReadResult(response, readEntity.getType(), componentName)), + listener::onFailure + ); + return () -> getStore(readEntity).readAsync(readEntity, actionListener); + } + + Metadata getGlobalMetadata(String clusterUUID, ClusterMetadataManifest clusterMetadataManifest) { + String globalMetadataFileName = clusterMetadataManifest.getGlobalMetadataFileName(); + try { + // Fetch Global metadata + if (globalMetadataFileName != null) { + RemoteGlobalMetadata remoteGlobalMetadata = new RemoteGlobalMetadata( + String.format(Locale.ROOT, METADATA_NAME_FORMAT, globalMetadataFileName), + clusterUUID, + compressor, + namedXContentRegistry + ); + return (Metadata) getStore(remoteGlobalMetadata).read(remoteGlobalMetadata); + } else if (clusterMetadataManifest.hasMetadataAttributesFiles()) { + // from CODEC_V2, we have started uploading all the metadata in granular files instead of a single entity + Metadata.Builder builder = new Metadata.Builder(); + if (clusterMetadataManifest.getCoordinationMetadata().getUploadedFilename() != null) { + RemoteCoordinationMetadata remoteCoordinationMetadata = new RemoteCoordinationMetadata( + clusterMetadataManifest.getCoordinationMetadata().getUploadedFilename(), + clusterUUID, + compressor, + namedXContentRegistry + ); + builder.coordinationMetadata( + (CoordinationMetadata) getStore(remoteCoordinationMetadata).read(remoteCoordinationMetadata) + ); + } + if (clusterMetadataManifest.getTemplatesMetadata().getUploadedFilename() != null) { + RemoteTemplatesMetadata remoteTemplatesMetadata = new RemoteTemplatesMetadata( + clusterMetadataManifest.getTemplatesMetadata().getUploadedFilename(), + clusterUUID, + compressor, + namedXContentRegistry + ); + builder.templates((TemplatesMetadata) getStore(remoteTemplatesMetadata).read(remoteTemplatesMetadata)); + } + if (clusterMetadataManifest.getSettingsMetadata().getUploadedFilename() != null) { + RemotePersistentSettingsMetadata remotePersistentSettingsMetadata = new RemotePersistentSettingsMetadata( + clusterMetadataManifest.getSettingsMetadata().getUploadedFilename(), + clusterUUID, + compressor, + namedXContentRegistry + ); + builder.persistentSettings( + (Settings) getStore(remotePersistentSettingsMetadata).read(remotePersistentSettingsMetadata) + ); + } + builder.clusterUUID(clusterMetadataManifest.getClusterUUID()); + builder.clusterUUIDCommitted(clusterMetadataManifest.isClusterUUIDCommitted()); + clusterMetadataManifest.getCustomMetadataMap().forEach((key, value) -> { + try { + RemoteCustomMetadata remoteCustomMetadata = new RemoteCustomMetadata( + value.getUploadedFilename(), + key, + clusterUUID, + compressor, + namedWriteableRegistry + ); + builder.putCustom(key, (Custom) getStore(remoteCustomMetadata).read(remoteCustomMetadata)); + } catch (IOException e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Error while downloading Custom Metadata - %s", value.getUploadedFilename()), + e + ); + } + }); + return builder.build(); + } else { + return Metadata.EMPTY_METADATA; + } + } catch (IOException e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Error while downloading Global Metadata - %s", globalMetadataFileName), + e + ); + } + } + + DiffableUtils.MapDiff> getCustomsDiff( + ClusterState currentState, + ClusterState previousState, + boolean firstUploadForSplitGlobalMetadata, + boolean isRemotePublicationEnabled + ) { + if (firstUploadForSplitGlobalMetadata) { + // For first split global metadata upload, we want to upload all customs + return DiffableUtils.diff( + Collections.emptyMap(), + filterCustoms(currentState.metadata().customs(), isRemotePublicationEnabled), + DiffableUtils.getStringKeySerializer(), + NonDiffableValueSerializer.getAbstractInstance() + ); + } + return DiffableUtils.diff( + filterCustoms(previousState.metadata().customs(), isRemotePublicationEnabled), + filterCustoms(currentState.metadata().customs(), isRemotePublicationEnabled), + DiffableUtils.getStringKeySerializer(), + NonDiffableValueSerializer.getAbstractInstance() + ); + } + + public static Map filterCustoms(Map customs, boolean isRemotePublicationEnabled) { + if (isRemotePublicationEnabled) { + return customs; + } + return customs.entrySet() + .stream() + .filter(e -> e.getValue().context().contains(XContentContext.GATEWAY)) + .collect(Collectors.toMap(Entry::getKey, Entry::getValue)); + } + + boolean isGlobalMetadataEqual(ClusterMetadataManifest first, ClusterMetadataManifest second, String clusterName) { + Metadata secondGlobalMetadata = getGlobalMetadata(second.getClusterUUID(), second); + Metadata firstGlobalMetadata = getGlobalMetadata(first.getClusterUUID(), first); + return Metadata.isGlobalResourcesMetadataEquals(firstGlobalMetadata, secondGlobalMetadata); + } + + private void setGlobalMetadataUploadTimeout(TimeValue newGlobalMetadataUploadTimeout) { + this.globalMetadataUploadTimeout = newGlobalMetadataUploadTimeout; + } + + public TimeValue getGlobalMetadataUploadTimeout() { + return this.globalMetadataUploadTimeout; + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java new file mode 100644 index 0000000000000..a84161b202a22 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java @@ -0,0 +1,165 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.CheckedRunnable; +import org.opensearch.common.remote.RemoteWritableEntityStore; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; +import org.opensearch.gateway.remote.model.RemoteIndexMetadata; +import org.opensearch.gateway.remote.model.RemoteReadResult; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +/** + * A Manager which provides APIs to write and read Index Metadata to remote store + * + * @opensearch.internal + */ +public class RemoteIndexMetadataManager { + + public static final TimeValue INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + + public static final Setting INDEX_METADATA_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.state.index_metadata.upload_timeout", + INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT, + Setting.Property.Dynamic, + Setting.Property.NodeScope, + Setting.Property.Deprecated + ); + + private final RemoteWritableEntityStore indexMetadataBlobStore; + private final Compressor compressor; + private final NamedXContentRegistry namedXContentRegistry; + + private volatile TimeValue indexMetadataUploadTimeout; + + public RemoteIndexMetadataManager( + ClusterSettings clusterSettings, + String clusterName, + BlobStoreRepository blobStoreRepository, + BlobStoreTransferService blobStoreTransferService, + ThreadPool threadpool + ) { + this.indexMetadataBlobStore = new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ); + this.namedXContentRegistry = blobStoreRepository.getNamedXContentRegistry(); + this.compressor = blobStoreRepository.getCompressor(); + this.indexMetadataUploadTimeout = clusterSettings.get(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING); + clusterSettings.addSettingsUpdateConsumer(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, this::setIndexMetadataUploadTimeout); + } + + /** + * Allows async Upload of IndexMetadata to remote + * + * @param indexMetadata {@link IndexMetadata} to upload + * @param latchedActionListener listener to respond back on after upload finishes + */ + CheckedRunnable getAsyncIndexMetadataWriteAction( + IndexMetadata indexMetadata, + String clusterUUID, + LatchedActionListener latchedActionListener + ) { + RemoteIndexMetadata remoteIndexMetadata = new RemoteIndexMetadata(indexMetadata, clusterUUID, compressor, namedXContentRegistry); + ActionListener completionListener = ActionListener.wrap( + resp -> latchedActionListener.onResponse(remoteIndexMetadata.getUploadedMetadata()), + ex -> latchedActionListener.onFailure(new RemoteStateTransferException(indexMetadata.getIndex().getName(), ex)) + ); + return () -> indexMetadataBlobStore.writeAsync(remoteIndexMetadata, completionListener); + } + + CheckedRunnable getAsyncIndexMetadataReadAction( + String clusterUUID, + String uploadedFilename, + LatchedActionListener latchedActionListener + ) { + RemoteIndexMetadata remoteIndexMetadata = new RemoteIndexMetadata( + RemoteClusterStateUtils.getFormattedIndexFileName(uploadedFilename), + clusterUUID, + compressor, + namedXContentRegistry + ); + ActionListener actionListener = ActionListener.wrap( + response -> latchedActionListener.onResponse( + new RemoteReadResult(response, RemoteIndexMetadata.INDEX, response.getIndex().getName()) + ), + latchedActionListener::onFailure + ); + return () -> indexMetadataBlobStore.readAsync(remoteIndexMetadata, actionListener); + } + + /** + * Fetch index metadata from remote cluster state + * + * @param uploadedIndexMetadata {@link ClusterMetadataManifest.UploadedIndexMetadata} contains details about remote location of index metadata + * @return {@link IndexMetadata} + */ + IndexMetadata getIndexMetadata(ClusterMetadataManifest.UploadedIndexMetadata uploadedIndexMetadata, String clusterUUID) { + RemoteIndexMetadata remoteIndexMetadata = new RemoteIndexMetadata( + RemoteClusterStateUtils.getFormattedIndexFileName(uploadedIndexMetadata.getUploadedFilename()), + clusterUUID, + compressor, + namedXContentRegistry + ); + try { + return indexMetadataBlobStore.read(remoteIndexMetadata); + } catch (IOException e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Error while downloading IndexMetadata - %s", uploadedIndexMetadata.getUploadedFilename()), + e + ); + } + } + + /** + * Fetch latest index metadata from remote cluster state + * + * @param clusterMetadataManifest manifest file of cluster + * @param clusterUUID uuid of cluster state to refer to in remote + * @return {@code Map} latest IndexUUID to IndexMetadata map + */ + Map getIndexMetadataMap(String clusterUUID, ClusterMetadataManifest clusterMetadataManifest) { + assert Objects.equals(clusterUUID, clusterMetadataManifest.getClusterUUID()) + : "Corrupt ClusterMetadataManifest found. Cluster UUID mismatch."; + Map remoteIndexMetadata = new HashMap<>(); + for (ClusterMetadataManifest.UploadedIndexMetadata uploadedIndexMetadata : clusterMetadataManifest.getIndices()) { + IndexMetadata indexMetadata = getIndexMetadata(uploadedIndexMetadata, clusterUUID); + remoteIndexMetadata.put(uploadedIndexMetadata.getIndexUUID(), indexMetadata); + } + return remoteIndexMetadata; + } + + public TimeValue getIndexMetadataUploadTimeout() { + return this.indexMetadataUploadTimeout; + } + + private void setIndexMetadataUploadTimeout(TimeValue newIndexMetadataUploadTimeout) { + this.indexMetadataUploadTimeout = newIndexMetadataUploadTimeout; + } + +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java new file mode 100644 index 0000000000000..cb09de1a6ec44 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java @@ -0,0 +1,319 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.Version; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest; +import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; +import org.opensearch.gateway.remote.model.RemoteClusterStateManifestInfo; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; + +/** + * A Manager which provides APIs to write and read {@link ClusterMetadataManifest} to remote store + * + * @opensearch.internal + */ +public class RemoteManifestManager { + + public static final TimeValue METADATA_MANIFEST_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + + public static final Setting METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.state.metadata_manifest.upload_timeout", + METADATA_MANIFEST_UPLOAD_TIMEOUT_DEFAULT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + private static final Logger logger = LogManager.getLogger(RemoteManifestManager.class); + + private volatile TimeValue metadataManifestUploadTimeout; + private final String nodeId; + private final RemoteClusterStateBlobStore manifestBlobStore; + private final Compressor compressor; + private final NamedXContentRegistry namedXContentRegistry; + // todo remove blobStorerepo from here + private final BlobStoreRepository blobStoreRepository; + + RemoteManifestManager( + ClusterSettings clusterSettings, + String clusterName, + String nodeId, + BlobStoreRepository blobStoreRepository, + BlobStoreTransferService blobStoreTransferService, + ThreadPool threadpool + ) { + this.metadataManifestUploadTimeout = clusterSettings.get(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING); + this.nodeId = nodeId; + this.manifestBlobStore = new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ); + ; + clusterSettings.addSettingsUpdateConsumer(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, this::setMetadataManifestUploadTimeout); + this.compressor = blobStoreRepository.getCompressor(); + this.namedXContentRegistry = blobStoreRepository.getNamedXContentRegistry(); + this.blobStoreRepository = blobStoreRepository; + } + + RemoteClusterStateManifestInfo uploadManifest( + ClusterState clusterState, + RemoteClusterStateUtils.UploadedMetadataResults uploadedMetadataResult, + String previousClusterUUID, + ClusterStateDiffManifest clusterDiffManifest, + boolean committed + ) { + synchronized (this) { + ClusterMetadataManifest.Builder manifestBuilder = ClusterMetadataManifest.builder(); + manifestBuilder.clusterTerm(clusterState.term()) + .stateVersion(clusterState.getVersion()) + .clusterUUID(clusterState.metadata().clusterUUID()) + .stateUUID(clusterState.stateUUID()) + .opensearchVersion(Version.CURRENT) + .nodeId(nodeId) + .committed(committed) + .codecVersion(RemoteClusterMetadataManifest.MANIFEST_CURRENT_CODEC_VERSION) + .indices(uploadedMetadataResult.uploadedIndexMetadata) + .previousClusterUUID(previousClusterUUID) + .clusterUUIDCommitted(clusterState.metadata().clusterUUIDCommitted()) + .coordinationMetadata(uploadedMetadataResult.uploadedCoordinationMetadata) + .settingMetadata(uploadedMetadataResult.uploadedSettingsMetadata) + .templatesMetadata(uploadedMetadataResult.uploadedTemplatesMetadata) + .customMetadataMap(uploadedMetadataResult.uploadedCustomMetadataMap) + .routingTableVersion(clusterState.getRoutingTable().version()) + .indicesRouting(uploadedMetadataResult.uploadedIndicesRoutingMetadata) + .discoveryNodesMetadata(uploadedMetadataResult.uploadedDiscoveryNodes) + .clusterBlocksMetadata(uploadedMetadataResult.uploadedClusterBlocks) + .diffManifest(clusterDiffManifest) + .metadataVersion(clusterState.metadata().version()) + .transientSettingsMetadata(uploadedMetadataResult.uploadedTransientSettingsMetadata) + .clusterStateCustomMetadataMap(uploadedMetadataResult.uploadedClusterStateCustomMetadataMap) + .hashesOfConsistentSettings(uploadedMetadataResult.uploadedHashesOfConsistentSettings); + final ClusterMetadataManifest manifest = manifestBuilder.build(); + String manifestFileName = writeMetadataManifest(clusterState.metadata().clusterUUID(), manifest); + return new RemoteClusterStateManifestInfo(manifest, manifestFileName); + } + } + + private String writeMetadataManifest(String clusterUUID, ClusterMetadataManifest uploadManifest) { + AtomicReference result = new AtomicReference(); + AtomicReference exceptionReference = new AtomicReference(); + + // latch to wait until upload is not finished + CountDownLatch latch = new CountDownLatch(1); + + LatchedActionListener completionListener = new LatchedActionListener<>(ActionListener.wrap(resp -> { + logger.trace(String.format(Locale.ROOT, "Manifest file uploaded successfully.")); + }, ex -> { exceptionReference.set(ex); }), latch); + + RemoteClusterMetadataManifest remoteClusterMetadataManifest = new RemoteClusterMetadataManifest( + uploadManifest, + clusterUUID, + compressor, + namedXContentRegistry + ); + manifestBlobStore.writeAsync(remoteClusterMetadataManifest, completionListener); + + try { + if (latch.await(getMetadataManifestUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { + RemoteStateTransferException ex = new RemoteStateTransferException( + String.format(Locale.ROOT, "Timed out waiting for transfer of manifest file to complete") + ); + throw ex; + } + } catch (InterruptedException ex) { + RemoteStateTransferException exception = new RemoteStateTransferException( + String.format(Locale.ROOT, "Timed out waiting for transfer of manifest file to complete - %s"), + ex + ); + Thread.currentThread().interrupt(); + throw exception; + } + if (exceptionReference.get() != null) { + throw new RemoteStateTransferException(exceptionReference.get().getMessage(), exceptionReference.get()); + } + logger.debug( + "Metadata manifest file [{}] written during [{}] phase. ", + remoteClusterMetadataManifest.getBlobFileName(), + uploadManifest.isCommitted() ? "commit" : "publish" + ); + return remoteClusterMetadataManifest.getUploadedMetadata().getUploadedFilename(); + } + + /** + * Fetch latest ClusterMetadataManifest from remote state store + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return ClusterMetadataManifest + */ + public Optional getLatestClusterMetadataManifest(String clusterName, String clusterUUID) { + Optional latestManifestFileName = getLatestManifestFileName(clusterName, clusterUUID); + return latestManifestFileName.map(s -> fetchRemoteClusterMetadataManifest(clusterName, clusterUUID, s)); + } + + public ClusterMetadataManifest getRemoteClusterMetadataManifestByFileName(String clusterUUID, String filename) + throws IllegalStateException { + try { + RemoteClusterMetadataManifest remoteClusterMetadataManifest = new RemoteClusterMetadataManifest( + filename, + clusterUUID, + compressor, + namedXContentRegistry + ); + return manifestBlobStore.read(remoteClusterMetadataManifest); + } catch (IOException e) { + throw new IllegalStateException(String.format(Locale.ROOT, "Error while downloading cluster metadata - %s", filename), e); + } + } + + /** + * Fetch ClusterMetadataManifest from remote state store + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return ClusterMetadataManifest + */ + ClusterMetadataManifest fetchRemoteClusterMetadataManifest(String clusterName, String clusterUUID, String filename) + throws IllegalStateException { + try { + String fullBlobName = getManifestFolderPath(clusterName, clusterUUID).buildAsString() + filename; + RemoteClusterMetadataManifest remoteClusterMetadataManifest = new RemoteClusterMetadataManifest( + fullBlobName, + clusterUUID, + compressor, + namedXContentRegistry + ); + return manifestBlobStore.read(remoteClusterMetadataManifest); + } catch (IOException e) { + throw new IllegalStateException(String.format(Locale.ROOT, "Error while downloading cluster metadata - %s", filename), e); + } + } + + Map getLatestManifestForAllClusterUUIDs(String clusterName, Set clusterUUIDs) { + Map manifestsByClusterUUID = new HashMap<>(); + for (String clusterUUID : clusterUUIDs) { + try { + Optional manifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); + manifest.ifPresent(clusterMetadataManifest -> manifestsByClusterUUID.put(clusterUUID, clusterMetadataManifest)); + } catch (Exception e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Exception in fetching manifest for clusterUUID: %s", clusterUUID), + e + ); + } + } + return manifestsByClusterUUID; + } + + private BlobContainer manifestContainer(String clusterName, String clusterUUID) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest + return blobStoreRepository.blobStore().blobContainer(getManifestFolderPath(clusterName, clusterUUID)); + } + + BlobPath getManifestFolderPath(String clusterName, String clusterUUID) { + return RemoteClusterStateUtils.getClusterMetadataBasePath(blobStoreRepository, clusterName, clusterUUID) + .add(RemoteClusterMetadataManifest.MANIFEST); + } + + public TimeValue getMetadataManifestUploadTimeout() { + return this.metadataManifestUploadTimeout; + } + + private void setMetadataManifestUploadTimeout(TimeValue newMetadataManifestUploadTimeout) { + this.metadataManifestUploadTimeout = newMetadataManifestUploadTimeout; + } + + /** + * Fetch ClusterMetadataManifest files from remote state store in order + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @param limit max no of files to fetch + * @return all manifest file names + */ + private List getManifestFileNames(String clusterName, String clusterUUID, String filePrefix, int limit) + throws IllegalStateException { + try { + + /* + {@link BlobContainer#listBlobsByPrefixInSortedOrder} will list the latest manifest file first + as the manifest file name generated via {@link RemoteClusterStateService#getManifestFileName} ensures + when sorted in LEXICOGRAPHIC order the latest uploaded manifest file comes on top. + */ + return manifestContainer(clusterName, clusterUUID).listBlobsByPrefixInSortedOrder( + filePrefix, + limit, + BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC + ); + } catch (IOException e) { + throw new IllegalStateException("Error while fetching latest manifest file for remote cluster state", e); + } + } + + static String getManifestFilePrefixForTermVersion(long term, long version) { + return String.join( + DELIMITER, + RemoteClusterMetadataManifest.MANIFEST, + RemoteStoreUtils.invertLong(term), + RemoteStoreUtils.invertLong(version) + ) + DELIMITER; + } + + /** + * Fetch latest ClusterMetadataManifest file from remote state store + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return latest ClusterMetadataManifest filename + */ + private Optional getLatestManifestFileName(String clusterName, String clusterUUID) throws IllegalStateException { + List manifestFilesMetadata = getManifestFileNames( + clusterName, + clusterUUID, + RemoteClusterMetadataManifest.MANIFEST + DELIMITER, + 1 + ); + if (manifestFilesMetadata != null && !manifestFilesMetadata.isEmpty()) { + return Optional.of(manifestFilesMetadata.get(0).name()); + } + logger.info("No manifest file present in remote store for cluster name: {}, cluster UUID: {}", clusterName, clusterUUID); + return Optional.empty(); + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java b/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java index f2330846fa23e..36d107a99d258 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java @@ -19,12 +19,16 @@ */ public class RemotePersistenceStats extends PersistedStateStats { static final String CLEANUP_ATTEMPT_FAILED_COUNT = "cleanup_attempt_failed_count"; + static final String INDEX_ROUTING_FILES_CLEANUP_ATTEMPT_FAILED_COUNT = "index_routing_files_cleanup_attempt_failed_count"; static final String REMOTE_UPLOAD = "remote_upload"; private AtomicLong cleanupAttemptFailedCount = new AtomicLong(0); + private AtomicLong indexRoutingFilesCleanupAttemptFailedCount = new AtomicLong(0); + public RemotePersistenceStats() { super(REMOTE_UPLOAD); addToExtendedFields(CLEANUP_ATTEMPT_FAILED_COUNT, cleanupAttemptFailedCount); + addToExtendedFields(INDEX_ROUTING_FILES_CLEANUP_ATTEMPT_FAILED_COUNT, indexRoutingFilesCleanupAttemptFailedCount); } public void cleanUpAttemptFailed() { @@ -34,4 +38,12 @@ public void cleanUpAttemptFailed() { public long getCleanupAttemptFailedCount() { return cleanupAttemptFailedCount.get(); } + + public void indexRoutingFilesCleanupAttemptFailed() { + indexRoutingFilesCleanupAttemptFailedCount.incrementAndGet(); + } + + public long getIndexRoutingFilesCleanupAttemptFailedCount() { + return indexRoutingFilesCleanupAttemptFailedCount.get(); + } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterBlocks.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterBlocks.java index 937f9dc2c8631..9c5fbd5941640 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterBlocks.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterBlocks.java @@ -10,21 +10,18 @@ import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.common.io.Streams; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; -import org.opensearch.core.common.io.stream.BytesStreamInput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumWritableBlobStoreFormat; import java.io.IOException; import java.io.InputStream; import java.util.List; -import static org.opensearch.core.common.bytes.BytesReference.toBytes; import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_STATE_ATTRIBUTES_CURRENT_CODEC_VERSION; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CLUSTER_STATE_EPHEMERAL_PATH_TOKEN; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; @@ -35,6 +32,10 @@ public class RemoteClusterBlocks extends AbstractRemoteWritableBlobEntity { public static final String CLUSTER_BLOCKS = "blocks"; + public static final ChecksumWritableBlobStoreFormat CLUSTER_BLOCKS_FORMAT = new ChecksumWritableBlobStoreFormat<>( + "blocks", + ClusterBlocks::readFrom + ); private ClusterBlocks clusterBlocks; private long stateVersion; @@ -55,6 +56,11 @@ public BlobPathParameters getBlobPathParameters() { return new BlobPathParameters(List.of(CLUSTER_STATE_EPHEMERAL_PATH_TOKEN), CLUSTER_BLOCKS); } + @Override + public String getType() { + return CLUSTER_BLOCKS; + } + @Override public String generateBlobFileName() { // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/transient/______ @@ -77,20 +83,11 @@ public UploadedMetadata getUploadedMetadata() { @Override public InputStream serialize() throws IOException { - try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { - clusterBlocks.writeTo(bytesStreamOutput); - return bytesStreamOutput.bytes().streamInput(); - } catch (IOException e) { - throw new IOException("Failed to serialize remote cluster blocks", e); - } + return CLUSTER_BLOCKS_FORMAT.serialize(clusterBlocks, generateBlobFileName(), getCompressor()).streamInput(); } @Override public ClusterBlocks deserialize(final InputStream inputStream) throws IOException { - try (StreamInput in = new BytesStreamInput(toBytes(Streams.readFully(inputStream)))) { - return ClusterBlocks.readFrom(in); - } catch (IOException e) { - throw new IOException("Failed to deserialize remote cluster blocks", e); - } + return CLUSTER_BLOCKS_FORMAT.deserialize(blobName, Streams.readFully(inputStream)); } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java index f7024c8fde7ed..1dc56712d4ab5 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java @@ -86,6 +86,11 @@ public BlobPathParameters getBlobPathParameters() { return new BlobPathParameters(List.of(MANIFEST), MANIFEST); } + @Override + public String getType() { + return MANIFEST; + } + @Override public String generateBlobFileName() { // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest/manifest______C/P____ @@ -126,16 +131,17 @@ public ClusterMetadataManifest deserialize(final InputStream inputStream) throws return blobStoreFormat.deserialize(blobName, getNamedXContentRegistry(), Streams.readFully(inputStream)); } - private int getManifestCodecVersion() { + // package private for testing + int getManifestCodecVersion() { assert blobName != null; - String[] splitName = blobName.split(DELIMITER); + String[] splitName = getBlobFileName().split(DELIMITER); if (splitName.length == SPLITTED_MANIFEST_FILE_LENGTH) { return Integer.parseInt(splitName[splitName.length - 1]); // Last value would be codec version. } else if (splitName.length < SPLITTED_MANIFEST_FILE_LENGTH) { // Where codec is not part of file name, i.e. default codec version 0 // is used. return ClusterMetadataManifest.CODEC_V0; } else { - throw new IllegalArgumentException("Manifest file name is corrupted"); + throw new IllegalArgumentException("Manifest file name is corrupted : " + blobName); } } @@ -150,4 +156,5 @@ private ChecksumBlobStoreFormat getClusterMetadataManif } throw new IllegalArgumentException("Cluster metadata manifest file is corrupted, don't have valid codec version"); } + } diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java index 83326f65f0d43..1dd23443f1252 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java @@ -72,7 +72,9 @@ public void writeAsync(final U entity, final ActionListener listener) { public T read(final U entity) throws IOException { // TODO Add timing logs and tracing assert entity.getFullBlobName() != null; - return entity.deserialize(transferService.downloadBlob(getBlobPathForDownload(entity), entity.getBlobFileName())); + try (InputStream inputStream = transferService.downloadBlob(getBlobPathForDownload(entity), entity.getBlobFileName())) { + return entity.deserialize(inputStream); + } } @Override diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustoms.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustoms.java index 60a21c9b53148..f384908bc6b65 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustoms.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustoms.java @@ -11,22 +11,19 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterState.Custom; import org.opensearch.common.io.Streams; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; -import org.opensearch.core.common.io.stream.BytesStreamInput; -import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest; import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumWritableBlobStoreFormat; import java.io.IOException; import java.io.InputStream; import java.util.List; -import static org.opensearch.cluster.ClusterState.FeatureAware.shouldSerialize; -import static org.opensearch.core.common.bytes.BytesReference.toBytes; import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_STATE_ATTRIBUTES_CURRENT_CODEC_VERSION; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CLUSTER_STATE_EPHEMERAL_PATH_TOKEN; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CUSTOM_DELIMITER; @@ -39,9 +36,10 @@ public class RemoteClusterStateCustoms extends AbstractRemoteWritableBlobEntity< public static final String CLUSTER_STATE_CUSTOM = "cluster-state-custom"; private long stateVersion; - private String customType; + private final String customType; private ClusterState.Custom custom; private final NamedWriteableRegistry namedWriteableRegistry; + private final ChecksumWritableBlobStoreFormat clusterStateCustomsFormat; public RemoteClusterStateCustoms( final ClusterState.Custom custom, @@ -56,6 +54,10 @@ public RemoteClusterStateCustoms( this.customType = customType; this.custom = custom; this.namedWriteableRegistry = namedWriteableRegistry; + this.clusterStateCustomsFormat = new ChecksumWritableBlobStoreFormat<>( + "cluster-state-custom", + is -> readFrom(is, namedWriteableRegistry, customType) + ); } public RemoteClusterStateCustoms( @@ -69,6 +71,10 @@ public RemoteClusterStateCustoms( this.blobName = blobName; this.customType = customType; this.namedWriteableRegistry = namedWriteableRegistry; + this.clusterStateCustomsFormat = new ChecksumWritableBlobStoreFormat<>( + "cluster-state-custom", + is -> readFrom(is, namedWriteableRegistry, customType) + ); } @Override @@ -77,6 +83,11 @@ public BlobPathParameters getBlobPathParameters() { return new BlobPathParameters(List.of(CLUSTER_STATE_EPHEMERAL_PATH_TOKEN), prefix); } + @Override + public String getType() { + return CLUSTER_STATE_CUSTOM; + } + @Override public String generateBlobFileName() { // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/ephemeral/______ @@ -102,27 +113,16 @@ public ClusterMetadataManifest.UploadedMetadata getUploadedMetadata() { @Override public InputStream serialize() throws IOException { - try (BytesStreamOutput outputStream = new BytesStreamOutput()) { - if (shouldSerialize(outputStream, custom)) { - outputStream.writeNamedWriteable(custom); - } - return outputStream.bytes().streamInput(); - } catch (IOException e) { - throw new IOException("Failed to serialize cluster state custom of type " + customType, e); - } + return clusterStateCustomsFormat.serialize(custom, generateBlobFileName(), getCompressor()).streamInput(); } @Override public ClusterState.Custom deserialize(final InputStream inputStream) throws IOException { - try ( - NamedWriteableAwareStreamInput in = new NamedWriteableAwareStreamInput( - new BytesStreamInput(toBytes(Streams.readFully(inputStream))), - this.namedWriteableRegistry - ) - ) { - return in.readNamedWriteable(Custom.class); - } catch (IOException e) { - throw new IOException("Failed to deserialize cluster state custom of type " + customType, e); - } + return clusterStateCustomsFormat.deserialize(blobName, Streams.readFully(inputStream)); + } + + public static ClusterState.Custom readFrom(StreamInput streamInput, NamedWriteableRegistry namedWriteableRegistry, String customType) + throws IOException { + return namedWriteableRegistry.getReader(ClusterState.Custom.class, customType).read(streamInput); } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCoordinationMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCoordinationMetadata.java index 95ad7f7724a08..a90721ab59f66 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCoordinationMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCoordinationMetadata.java @@ -70,6 +70,11 @@ public BlobPathParameters getBlobPathParameters() { return new BlobPathParameters(List.of("global-metadata"), COORDINATION_METADATA); } + @Override + public String getType() { + return COORDINATION_METADATA; + } + @Override public String generateBlobFileName() { // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/______ diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java index 682cac4b39d10..4c7069ee8be9e 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java @@ -8,18 +8,17 @@ package org.opensearch.gateway.remote.model; -import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.Metadata.Custom; import org.opensearch.common.io.Streams; import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.compress.Compressor; -import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; -import org.opensearch.gateway.remote.RemoteClusterStateUtils; import org.opensearch.index.remote.RemoteStoreUtils; -import org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat; +import org.opensearch.repositories.blobstore.ChecksumWritableBlobStoreFormat; import java.io.IOException; import java.io.InputStream; @@ -28,7 +27,6 @@ import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.GLOBAL_METADATA_CURRENT_CODEC_VERSION; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.GLOBAL_METADATA_PATH_TOKEN; -import static org.opensearch.gateway.remote.RemoteClusterStateUtils.METADATA_NAME_PLAIN_FORMAT; /** * Wrapper class for uploading/downloading {@link Custom} to/from remote blob store @@ -37,11 +35,12 @@ public class RemoteCustomMetadata extends AbstractRemoteWritableBlobEntity customBlobStoreFormat; + public final ChecksumWritableBlobStoreFormat customBlobStoreFormat; private Custom custom; private final String customType; private long metadataVersion; + private final NamedWriteableRegistry namedWriteableRegistry; public RemoteCustomMetadata( final Custom custom, @@ -49,16 +48,16 @@ public RemoteCustomMetadata( final long metadataVersion, final String clusterUUID, Compressor compressor, - NamedXContentRegistry namedXContentRegistry + NamedWriteableRegistry namedWriteableRegistry ) { - super(clusterUUID, compressor, namedXContentRegistry); + super(clusterUUID, compressor, null); this.custom = custom; this.customType = customType; this.metadataVersion = metadataVersion; - this.customBlobStoreFormat = new ChecksumBlobStoreFormat<>( + this.namedWriteableRegistry = namedWriteableRegistry; + this.customBlobStoreFormat = new ChecksumWritableBlobStoreFormat<>( "custom", - METADATA_NAME_PLAIN_FORMAT, - (parser -> Metadata.Custom.fromXContent(parser, customType)) + is -> readFrom(is, namedWriteableRegistry, customType) ); } @@ -67,15 +66,15 @@ public RemoteCustomMetadata( final String customType, final String clusterUUID, final Compressor compressor, - final NamedXContentRegistry namedXContentRegistry + final NamedWriteableRegistry namedWriteableRegistry ) { - super(clusterUUID, compressor, namedXContentRegistry); + super(clusterUUID, compressor, null); this.blobName = blobName; this.customType = customType; - this.customBlobStoreFormat = new ChecksumBlobStoreFormat<>( + this.namedWriteableRegistry = namedWriteableRegistry; + this.customBlobStoreFormat = new ChecksumWritableBlobStoreFormat<>( "custom", - METADATA_NAME_PLAIN_FORMAT, - (parser -> Metadata.Custom.fromXContent(parser, customType)) + is -> readFrom(is, namedWriteableRegistry, customType) ); } @@ -85,6 +84,11 @@ public BlobPathParameters getBlobPathParameters() { return new BlobPathParameters(List.of(GLOBAL_METADATA_PATH_TOKEN), prefix); } + @Override + public String getType() { + return CUSTOM_METADATA; + } + @Override public String generateBlobFileName() { // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/______ @@ -102,13 +106,12 @@ public String generateBlobFileName() { @Override public InputStream serialize() throws IOException { - return customBlobStoreFormat.serialize(custom, generateBlobFileName(), getCompressor(), RemoteClusterStateUtils.FORMAT_PARAMS) - .streamInput(); + return customBlobStoreFormat.serialize(custom, generateBlobFileName(), getCompressor()).streamInput(); } @Override public Custom deserialize(final InputStream inputStream) throws IOException { - return customBlobStoreFormat.deserialize(blobName, getNamedXContentRegistry(), Streams.readFully(inputStream)); + return customBlobStoreFormat.deserialize(blobName, Streams.readFully(inputStream)); } @Override @@ -116,4 +119,9 @@ public UploadedMetadata getUploadedMetadata() { assert blobName != null; return new UploadedMetadataAttribute(String.join(CUSTOM_DELIMITER, CUSTOM_METADATA, customType), blobName); } + + public static Custom readFrom(StreamInput streamInput, NamedWriteableRegistry namedWriteableRegistry, String customType) + throws IOException { + return namedWriteableRegistry.getReader(Custom.class, customType).read(streamInput); + } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodes.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodes.java index 7dc2b6492de7e..fb399e2899cdd 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodes.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodes.java @@ -10,21 +10,18 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.io.Streams; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; -import org.opensearch.core.common.io.stream.BytesStreamInput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumWritableBlobStoreFormat; import java.io.IOException; import java.io.InputStream; import java.util.List; -import static org.opensearch.core.common.bytes.BytesReference.toBytes; import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_STATE_ATTRIBUTES_CURRENT_CODEC_VERSION; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CLUSTER_STATE_EPHEMERAL_PATH_TOKEN; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; @@ -35,6 +32,10 @@ public class RemoteDiscoveryNodes extends AbstractRemoteWritableBlobEntity { public static final String DISCOVERY_NODES = "nodes"; + public static final ChecksumWritableBlobStoreFormat DISCOVERY_NODES_FORMAT = new ChecksumWritableBlobStoreFormat<>( + "nodes", + is -> DiscoveryNodes.readFrom(is, null) + ); private DiscoveryNodes discoveryNodes; private long stateVersion; @@ -60,6 +61,11 @@ public BlobPathParameters getBlobPathParameters() { return new BlobPathParameters(List.of(CLUSTER_STATE_EPHEMERAL_PATH_TOKEN), DISCOVERY_NODES); } + @Override + public String getType() { + return DISCOVERY_NODES; + } + @Override public String generateBlobFileName() { // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/ephemeral/______ @@ -82,20 +88,11 @@ public UploadedMetadata getUploadedMetadata() { @Override public InputStream serialize() throws IOException { - try (BytesStreamOutput outputStream = new BytesStreamOutput()) { - discoveryNodes.writeTo(outputStream); - return outputStream.bytes().streamInput(); - } catch (IOException e) { - throw new IOException("Failed to serialize remote discovery nodes", e); - } + return DISCOVERY_NODES_FORMAT.serialize(discoveryNodes, generateBlobFileName(), getCompressor()).streamInput(); } @Override public DiscoveryNodes deserialize(final InputStream inputStream) throws IOException { - try (StreamInput streamInput = new BytesStreamInput(toBytes(Streams.readFully(inputStream)))) { - return DiscoveryNodes.readFrom(streamInput, null); - } catch (IOException e) { - throw new IOException("Failed to deserialize remote discovery nodes", e); - } + return DISCOVERY_NODES_FORMAT.deserialize(blobName, Streams.readFully(inputStream)); } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadata.java index 8e41b155ecb93..09f07de0d5c24 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadata.java @@ -26,6 +26,7 @@ * Wrapper class for uploading/downloading global metadata ({@link Metadata}) to/from remote blob store */ public class RemoteGlobalMetadata extends AbstractRemoteWritableBlobEntity { + public static final String GLOBAL_METADATA = "global_metadata"; public static final ChecksumBlobStoreFormat GLOBAL_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( "metadata", @@ -48,6 +49,11 @@ public BlobPathParameters getBlobPathParameters() { throw new UnsupportedOperationException(); } + @Override + public String getType() { + return GLOBAL_METADATA; + } + @Override public String generateBlobFileName() { throw new UnsupportedOperationException(); diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteHashesOfConsistentSettings.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteHashesOfConsistentSettings.java index a70506bcd6846..1debf75cdfec9 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteHashesOfConsistentSettings.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteHashesOfConsistentSettings.java @@ -10,20 +10,17 @@ import org.opensearch.cluster.metadata.DiffableStringMap; import org.opensearch.common.io.Streams; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; -import org.opensearch.core.common.io.stream.BytesStreamInput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest; import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumWritableBlobStoreFormat; import java.io.IOException; import java.io.InputStream; import java.util.List; -import static org.opensearch.core.common.bytes.BytesReference.toBytes; import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_STATE_ATTRIBUTES_CURRENT_CODEC_VERSION; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.GLOBAL_METADATA_PATH_TOKEN; @@ -33,6 +30,8 @@ */ public class RemoteHashesOfConsistentSettings extends AbstractRemoteWritableBlobEntity { public static final String HASHES_OF_CONSISTENT_SETTINGS = "hashes-of-consistent-settings"; + public static final ChecksumWritableBlobStoreFormat HASHES_OF_CONSISTENT_SETTINGS_FORMAT = + new ChecksumWritableBlobStoreFormat<>("hashes-of-consistent-settings", DiffableStringMap::readFrom); private DiffableStringMap hashesOfConsistentSettings; private long metadataVersion; @@ -58,6 +57,11 @@ public BlobPathParameters getBlobPathParameters() { return new BlobPathParameters(List.of(GLOBAL_METADATA_PATH_TOKEN), HASHES_OF_CONSISTENT_SETTINGS); } + @Override + public String getType() { + return HASHES_OF_CONSISTENT_SETTINGS; + } + @Override public String generateBlobFileName() { String blobFileName = String.join( @@ -79,21 +83,12 @@ public ClusterMetadataManifest.UploadedMetadata getUploadedMetadata() { @Override public InputStream serialize() throws IOException { - try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { - hashesOfConsistentSettings.writeTo(bytesStreamOutput); - return bytesStreamOutput.bytes().streamInput(); - } catch (IOException e) { - throw new IOException("Failed to serialize hashes of consistent settings", e); - } - + return HASHES_OF_CONSISTENT_SETTINGS_FORMAT.serialize(hashesOfConsistentSettings, generateBlobFileName(), getCompressor()) + .streamInput(); } @Override public DiffableStringMap deserialize(final InputStream inputStream) throws IOException { - try (StreamInput in = new BytesStreamInput(toBytes(Streams.readFully(inputStream)))) { - return DiffableStringMap.readFrom(in); - } catch (IOException e) { - throw new IOException("Failed to deserialize hashes of consistent settings", e); - } + return HASHES_OF_CONSISTENT_SETTINGS_FORMAT.deserialize(blobName, Streams.readFully(inputStream)); } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteIndexMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteIndexMetadata.java index 0966a7b09fe17..830b09b92e2cb 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteIndexMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteIndexMetadata.java @@ -31,14 +31,14 @@ */ public class RemoteIndexMetadata extends AbstractRemoteWritableBlobEntity { - public static final int INDEX_METADATA_CURRENT_CODEC_VERSION = 1; + public static final int INDEX_METADATA_CURRENT_CODEC_VERSION = 2; public static final ChecksumBlobStoreFormat INDEX_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( "index-metadata", METADATA_NAME_PLAIN_FORMAT, IndexMetadata::fromXContent ); - public static final String INDEX_PATH_TOKEN = "index"; + public static final String INDEX = "index"; private IndexMetadata indexMetadata; @@ -64,7 +64,12 @@ public RemoteIndexMetadata( @Override public BlobPathParameters getBlobPathParameters() { - return new BlobPathParameters(List.of(INDEX_PATH_TOKEN, indexMetadata.getIndexUUID()), "metadata"); + return new BlobPathParameters(List.of(INDEX, indexMetadata.getIndexUUID()), "metadata"); + } + + @Override + public String getType() { + return INDEX; } @Override diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemotePersistentSettingsMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemotePersistentSettingsMetadata.java index e7604f7b62e72..9ee3db8f289e3 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemotePersistentSettingsMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemotePersistentSettingsMetadata.java @@ -26,7 +26,7 @@ import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.GLOBAL_METADATA_CURRENT_CODEC_VERSION; -import static org.opensearch.gateway.remote.RemoteClusterStateUtils.METADATA_NAME_FORMAT; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.METADATA_NAME_PLAIN_FORMAT; /** * Wrapper class for uploading/downloading persistent {@link Settings} to/from remote blob store @@ -37,7 +37,7 @@ public class RemotePersistentSettingsMetadata extends AbstractRemoteWritableBlob public static final ChecksumBlobStoreFormat SETTINGS_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( "settings", - METADATA_NAME_FORMAT, + METADATA_NAME_PLAIN_FORMAT, Settings::fromXContent ); @@ -71,6 +71,11 @@ public BlobPathParameters getBlobPathParameters() { return new BlobPathParameters(List.of("global-metadata"), SETTING_METADATA); } + @Override + public String getType() { + return SETTING_METADATA; + } + @Override public String generateBlobFileName() { String blobFileName = String.join( diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteReadResult.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteReadResult.java index adee09eaeffef..06d3b88ae1ecf 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteReadResult.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteReadResult.java @@ -8,24 +8,22 @@ package org.opensearch.gateway.remote.model; -import org.opensearch.core.xcontent.ToXContent; - /** * Container class for entity read from remote store */ public class RemoteReadResult { - ToXContent obj; + Object obj; String component; String componentName; - public RemoteReadResult(ToXContent obj, String component, String componentName) { + public RemoteReadResult(Object obj, String component, String componentName) { this.obj = obj; this.component = component; this.componentName = componentName; } - public ToXContent getObj() { + public Object getObj() { return obj; } diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTemplatesMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTemplatesMetadata.java index bbce063a5a0f0..4513d35aef5e8 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTemplatesMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTemplatesMetadata.java @@ -26,7 +26,7 @@ import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.GLOBAL_METADATA_CURRENT_CODEC_VERSION; -import static org.opensearch.gateway.remote.RemoteClusterStateUtils.METADATA_NAME_FORMAT; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.METADATA_NAME_PLAIN_FORMAT; /** * Wrapper class for uploading/downloading {@link TemplatesMetadata} to/from remote blob store @@ -37,7 +37,7 @@ public class RemoteTemplatesMetadata extends AbstractRemoteWritableBlobEntity TEMPLATES_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( "templates", - METADATA_NAME_FORMAT, + METADATA_NAME_PLAIN_FORMAT, TemplatesMetadata::fromXContent ); private TemplatesMetadata templatesMetadata; @@ -70,6 +70,11 @@ public BlobPathParameters getBlobPathParameters() { return new BlobPathParameters(List.of("global-metadata"), TEMPLATES_METADATA); } + @Override + public String getType() { + return TEMPLATES_METADATA; + } + @Override public String generateBlobFileName() { // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/______ diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTransientSettingsMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTransientSettingsMetadata.java index fe32b95f5e957..fd0526f05d015 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTransientSettingsMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTransientSettingsMetadata.java @@ -72,6 +72,11 @@ public BlobPathParameters getBlobPathParameters() { return new BlobPathParameters(List.of(GLOBAL_METADATA_PATH_TOKEN), TRANSIENT_SETTING_METADATA); } + @Override + public String getType() { + return TRANSIENT_SETTING_METADATA; + } + @Override public String generateBlobFileName() { String blobFileName = String.join( diff --git a/server/src/main/java/org/opensearch/index/query/SourceFieldMatchQuery.java b/server/src/main/java/org/opensearch/index/query/SourceFieldMatchQuery.java index b0be20e417efe..e9abcb698f68f 100644 --- a/server/src/main/java/org/opensearch/index/query/SourceFieldMatchQuery.java +++ b/server/src/main/java/org/opensearch/index/query/SourceFieldMatchQuery.java @@ -73,7 +73,7 @@ public void visit(QueryVisitor visitor) { @Override public Query rewrite(IndexSearcher indexSearcher) throws IOException { - Query rewritten = indexSearcher.rewrite(delegateQuery); + Query rewritten = delegateQuery.rewrite(indexSearcher); if (rewritten == delegateQuery) { return this; } diff --git a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java index fe90f24b0f544..d3c6fc9d1f3bf 100644 --- a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java +++ b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java @@ -152,7 +152,11 @@ public RemoteRestoreResult restore( throw new IllegalArgumentException("clusterUUID to restore from should be different from current cluster UUID"); } logger.info("Restoring cluster state from remote store from cluster UUID : [{}]", restoreClusterUUID); - remoteState = remoteClusterStateService.getLatestClusterState(currentState.getClusterName().value(), restoreClusterUUID); + remoteState = remoteClusterStateService.getLatestClusterState( + currentState.getClusterName().value(), + restoreClusterUUID, + false + ); remoteState.getMetadata().getIndices().values().forEach(indexMetadata -> { indexMetadataMap.put(indexMetadata.getIndex().getName(), new Tuple<>(true, indexMetadata)); }); diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java b/server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java index d736a82d57a7c..d5617bdfd94a7 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java @@ -23,7 +23,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; import org.opensearch.gateway.remote.IndexMetadataUploadListener; -import org.opensearch.gateway.remote.RemoteClusterStateService.RemoteStateTransferException; +import org.opensearch.gateway.remote.RemoteStateTransferException; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.node.Node; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; @@ -45,7 +45,7 @@ import java.util.function.Supplier; import java.util.stream.Collectors; -import static org.opensearch.gateway.remote.RemoteClusterStateService.INDEX_METADATA_UPLOAD_TIMEOUT_SETTING; +import static org.opensearch.gateway.remote.RemoteGlobalMetadataManager.GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING; import static org.opensearch.index.remote.RemoteIndexPath.COMBINED_PATH; import static org.opensearch.index.remote.RemoteIndexPath.SEGMENT_PATH; import static org.opensearch.index.remote.RemoteIndexPath.TRANSLOG_PATH; @@ -80,7 +80,7 @@ public class RemoteIndexPathUploader extends IndexMetadataUploadListener { private final boolean isRemoteDataAttributePresent; private final boolean isTranslogSegmentRepoSame; private final Supplier repositoriesService; - private volatile TimeValue indexMetadataUploadTimeout; + private volatile TimeValue metadataUploadTimeout; private BlobStoreRepository translogRepository; private BlobStoreRepository segmentRepository; @@ -98,8 +98,8 @@ public RemoteIndexPathUploader( // If the remote data attributes are not present, then there is no effect of translog and segment being same or different or null. isTranslogSegmentRepoSame = isTranslogSegmentRepoSame(); Objects.requireNonNull(clusterSettings); - indexMetadataUploadTimeout = clusterSettings.get(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING); - clusterSettings.addSettingsUpdateConsumer(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, this::setIndexMetadataUploadTimeout); + metadataUploadTimeout = clusterSettings.get(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING); + clusterSettings.addSettingsUpdateConsumer(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, this::setMetadataUploadTimeout); } @Override @@ -131,7 +131,7 @@ protected void doOnUpload( logger.trace(new ParameterizedMessage("Remote index path upload started for {}", indexNames)); try { - if (latch.await(indexMetadataUploadTimeout.millis(), TimeUnit.MILLISECONDS) == false) { + if (latch.await(metadataUploadTimeout.millis(), TimeUnit.MILLISECONDS) == false) { RemoteStateTransferException ex = new RemoteStateTransferException( String.format(Locale.ROOT, TIMEOUT_EXCEPTION_MSG, indexNames) ); @@ -289,8 +289,8 @@ private boolean requiresPathUpload(IndexMetadata indexMetadata, IndexMetadata pr return pathType == PathType.HASHED_PREFIX && (Objects.isNull(prevPathType) || prevPathType != PathType.HASHED_PREFIX); } - private void setIndexMetadataUploadTimeout(TimeValue newIndexMetadataUploadTimeout) { - this.indexMetadataUploadTimeout = newIndexMetadataUploadTimeout; + private void setMetadataUploadTimeout(TimeValue newIndexMetadataUploadTimeout) { + this.metadataUploadTimeout = newIndexMetadataUploadTimeout; } /** diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 49cb710c915fc..82b68b32f3bf8 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -2146,7 +2146,7 @@ public void waitForRemoteStoreSync(Runnable onProgress) throws IOException { segmentUploadeCount = directory.getSegmentsUploadedToRemoteStore().size(); } try { - Thread.sleep(TimeValue.timeValueSeconds(30).seconds()); + Thread.sleep(TimeValue.timeValueSeconds(30).millis()); } catch (InterruptedException ie) { throw new OpenSearchException("Interrupted waiting for completion of [{}]", ie); } diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index 57f7e402536f2..06cd77a34fe0b 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -75,7 +75,6 @@ import java.io.IOException; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -153,7 +152,8 @@ public final class IndicesRequestCache implements RemovalListener cache; private final ClusterService clusterService; - private final Function> cacheEntityLookup; + // pkg-private for testing + final Function> cacheEntityLookup; // pkg-private for testing final IndicesRequestCacheCleanupManager cacheCleanupManager; @@ -506,7 +506,7 @@ public int hashCode() { * */ class IndicesRequestCacheCleanupManager implements Closeable { private final Set keysToClean; - private final ConcurrentMap> cleanupKeyToCountMap; + private final ConcurrentMap> cleanupKeyToCountMap; private final AtomicInteger staleKeysCount; private volatile double stalenessThreshold; private final IndicesRequestCacheCleaner cacheCleaner; @@ -567,7 +567,13 @@ private void updateStaleCountOnCacheInsert(CleanupKey cleanupKey) { // If the key doesn't exist, it's added with a value of 1. // If the key exists, its value is incremented by 1. - cleanupKeyToCountMap.computeIfAbsent(shardId, k -> new HashMap<>()).merge(cleanupKey.readerCacheKeyId, 1, Integer::sum); + addToCleanupKeyToCountMap(shardId, cleanupKey.readerCacheKeyId); + } + + // pkg-private for testing + void addToCleanupKeyToCountMap(ShardId shardId, String readerCacheKeyId) { + cleanupKeyToCountMap.computeIfAbsent(shardId, k -> ConcurrentCollections.newConcurrentMap()) + .merge(readerCacheKeyId, 1, Integer::sum); } /** @@ -825,7 +831,7 @@ public void close() { } // for testing - ConcurrentMap> getCleanupKeyToCountMap() { + ConcurrentMap> getCleanupKeyToCountMap() { return cleanupKeyToCountMap; } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 251be8a990055..73b4cf3259d5b 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -404,7 +404,7 @@ public IndicesService( if (indexService == null) { return Optional.empty(); } - return Optional.of(new IndexShardCacheEntity(indexService.getShard(shardId.id()))); + return Optional.of(new IndexShardCacheEntity(indexService.getShardOrNull(shardId.id()))); }), cacheService, threadPool, clusterService); this.indicesQueryCache = new IndicesQueryCache(settings); this.mapperRegistry = mapperRegistry; diff --git a/server/src/main/java/org/opensearch/ingest/IngestService.java b/server/src/main/java/org/opensearch/ingest/IngestService.java index ab8e823199447..2281ccd4c0382 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestService.java +++ b/server/src/main/java/org/opensearch/ingest/IngestService.java @@ -775,7 +775,7 @@ private void executePipelinesInBatchRequests( ), results.get(i).getException() ); - onFailure.accept(slots.get(i), results.get(i).getException()); + onFailure.accept(results.get(i).getSlot(), results.get(i).getException()); } } @@ -1092,15 +1092,15 @@ private void innerBatchExecute( } if (!exceptions.isEmpty()) { totalMetrics.failedN(exceptions.size()); - } else if (!dropped.isEmpty()) { + } + if (!dropped.isEmpty()) { dropped.forEach(t -> itemDroppedHandler.accept(t.getSlot())); - } else { - for (IngestDocumentWrapper ingestDocumentWrapper : succeeded) { - updateIndexRequestWithIngestDocument( - slotToindexRequestMap.get(ingestDocumentWrapper.getSlot()), - ingestDocumentWrapper.getIngestDocument() - ); - } + } + for (IngestDocumentWrapper ingestDocumentWrapper : succeeded) { + updateIndexRequestWithIngestDocument( + slotToindexRequestMap.get(ingestDocumentWrapper.getSlot()), + ingestDocumentWrapper.getIngestDocument() + ); } handler.accept(allResults); } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index fd6621fb8f24d..4d143fa3d62bc 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -770,7 +770,8 @@ protected Node( clusterService, threadPool::preciseRelativeTimeInNanos, threadPool, - List.of(remoteIndexPathUploader) + List.of(remoteIndexPathUploader), + namedWriteableRegistry ); remoteClusterStateCleanupManager = remoteClusterStateService.getCleanupManager(); } else { diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index cd8714f6b556a..abb968c2de245 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -106,6 +106,7 @@ import java.util.function.Function; import java.util.function.LongSupplier; +import static org.opensearch.search.SearchService.CARDINALITY_AGGREGATION_PRUNING_THRESHOLD; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.SearchService.MAX_AGGREGATION_REWRITE_FILTERS; @@ -189,6 +190,7 @@ final class DefaultSearchContext extends SearchContext { private final boolean concurrentSearchSettingsEnabled; private final SetOnce requestShouldUseConcurrentSearch = new SetOnce<>(); private final int maxAggRewriteFilters; + private final int cardinalityAggregationPruningThreshold; DefaultSearchContext( ReaderContext readerContext, @@ -244,6 +246,7 @@ final class DefaultSearchContext extends SearchContext { this.requestToAggReduceContextBuilder = requestToAggReduceContextBuilder; this.maxAggRewriteFilters = evaluateFilterRewriteSetting(); + this.cardinalityAggregationPruningThreshold = evaluateCardinalityAggregationPruningThreshold(); } @Override @@ -1010,4 +1013,16 @@ private int evaluateFilterRewriteSetting() { } return 0; } + + @Override + public int cardinalityAggregationPruningThreshold() { + return cardinalityAggregationPruningThreshold; + } + + private int evaluateCardinalityAggregationPruningThreshold() { + if (clusterService != null) { + return clusterService.getClusterSettings().get(CARDINALITY_AGGREGATION_PRUNING_THRESHOLD); + } + return 0; + } } diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index 45f111d889522..135af91912e5d 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -288,6 +288,15 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv Property.NodeScope ); + // value 0 can disable dynamic pruning optimization in cardinality aggregation + public static final Setting CARDINALITY_AGGREGATION_PRUNING_THRESHOLD = Setting.intSetting( + "search.dynamic_pruning.cardinality_aggregation.max_allowed_cardinality", + 100, + 0, + Property.Dynamic, + Property.NodeScope + ); + public static final int DEFAULT_SIZE = 10; public static final int DEFAULT_FROM = 0; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java index 99c4eaac4b777..0f3d975960364 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java @@ -32,13 +32,28 @@ package org.opensearch.search.aggregations.metrics; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.BulkScorer; +import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.DisiPriorityQueue; +import org.apache.lucene.search.DisiWrapper; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.RamUsageEstimator; +import org.opensearch.OpenSearchStatusException; import org.opensearch.common.Nullable; import org.opensearch.common.hash.MurmurHash3; import org.opensearch.common.lease.Releasable; @@ -48,6 +63,7 @@ import org.opensearch.common.util.BitMixer; import org.opensearch.common.util.LongArray; import org.opensearch.common.util.ObjectArray; +import org.opensearch.core.rest.RestStatus; import org.opensearch.index.fielddata.SortedBinaryDocValues; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.aggregations.Aggregator; @@ -58,9 +74,12 @@ import org.opensearch.search.internal.SearchContext; import java.io.IOException; +import java.util.HashMap; import java.util.Map; import java.util.function.BiConsumer; +import static org.opensearch.search.SearchService.CARDINALITY_AGGREGATION_PRUNING_THRESHOLD; + /** * An aggregator that computes approximate counts of unique values. * @@ -68,9 +87,13 @@ */ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue { + private static final Logger logger = LogManager.getLogger(CardinalityAggregator.class); + private final int precision; private final ValuesSource valuesSource; + private final ValuesSourceConfig valuesSourceConfig; + // Expensive to initialize, so we only initialize it when we have an actual value source @Nullable private HyperLogLogPlusPlus counts; @@ -82,6 +105,7 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue private int ordinalsCollectorsUsed; private int ordinalsCollectorsOverheadTooHigh; private int stringHashingCollectorsUsed; + private int dynamicPrunedSegments; public CardinalityAggregator( String name, @@ -96,6 +120,7 @@ public CardinalityAggregator( this.valuesSource = valuesSourceConfig.hasValues() ? valuesSourceConfig.getValuesSource() : null; this.precision = precision; this.counts = valuesSource == null ? null : new HyperLogLogPlusPlus(precision, context.bigArrays(), 1); + this.valuesSourceConfig = valuesSourceConfig; } @Override @@ -118,6 +143,7 @@ private Collector pickCollector(LeafReaderContext ctx) throws IOException { return new DirectCollector(counts, hashValues); } + Collector collector = null; if (valuesSource instanceof ValuesSource.Bytes.WithOrdinals) { ValuesSource.Bytes.WithOrdinals source = (ValuesSource.Bytes.WithOrdinals) valuesSource; final SortedSetDocValues ordinalValues = source.ordinalsValues(ctx); @@ -125,20 +151,109 @@ private Collector pickCollector(LeafReaderContext ctx) throws IOException { if (maxOrd == 0) { emptyCollectorsUsed++; return new EmptyCollector(); + } else { + final long ordinalsMemoryUsage = OrdinalsCollector.memoryOverhead(maxOrd); + final long countsMemoryUsage = HyperLogLogPlusPlus.memoryUsage(precision); + // only use ordinals if they don't increase memory usage by more than 25% + if (ordinalsMemoryUsage < countsMemoryUsage / 4) { + ordinalsCollectorsUsed++; + collector = new OrdinalsCollector(counts, ordinalValues, context.bigArrays()); + } else { + ordinalsCollectorsOverheadTooHigh++; + } + } + } + + if (collector == null) { // not able to build an OrdinalsCollector + stringHashingCollectorsUsed++; + collector = new DirectCollector(counts, MurmurHash3Values.hash(valuesSource.bytesValues(ctx))); + } + + if (canPrune(parent, subAggregators, valuesSourceConfig)) { + Terms terms = ctx.reader().terms(valuesSourceConfig.fieldContext().field()); + if (terms == null) return collector; + if (exceedMaxThreshold(terms)) { + return collector; + } + + Collector pruningCollector = tryWrapWithPruningCollector(collector, terms, ctx); + if (pruningCollector == null) { + return collector; } - final long ordinalsMemoryUsage = OrdinalsCollector.memoryOverhead(maxOrd); - final long countsMemoryUsage = HyperLogLogPlusPlus.memoryUsage(precision); - // only use ordinals if they don't increase memory usage by more than 25% - if (ordinalsMemoryUsage < countsMemoryUsage / 4) { - ordinalsCollectorsUsed++; - return new OrdinalsCollector(counts, ordinalValues, context.bigArrays()); + if (!tryScoreWithPruningCollector(ctx, pruningCollector)) { + return collector; + } + logger.debug("Dynamic pruned segment {} of shard {}", ctx.ord, context.indexShard().shardId()); + dynamicPrunedSegments++; + + return getNoOpCollector(); + } + + return collector; + } + + private boolean canPrune(Aggregator parent, Aggregator[] subAggregators, ValuesSourceConfig valuesSourceConfig) { + return parent == null && subAggregators.length == 0 && valuesSourceConfig.missing() == null && valuesSourceConfig.script() == null; + } + + private boolean exceedMaxThreshold(Terms terms) throws IOException { + if (terms.size() > context.cardinalityAggregationPruningThreshold()) { + logger.debug( + "Cannot prune because terms size {} is greater than the threshold {}", + terms.size(), + context.cardinalityAggregationPruningThreshold() + ); + return true; + } + return false; + } + + private Collector tryWrapWithPruningCollector(Collector collector, Terms terms, LeafReaderContext ctx) { + try { + return new PruningCollector(collector, terms.iterator(), ctx, context, valuesSourceConfig.fieldContext().field()); + } catch (Exception e) { + logger.warn("Failed to build collector for dynamic pruning.", e); + return null; + } + } + + private boolean tryScoreWithPruningCollector(LeafReaderContext ctx, Collector pruningCollector) throws IOException { + try { + Weight weight = context.query().rewrite(context.searcher()).createWeight(context.searcher(), ScoreMode.TOP_DOCS, 1f); + BulkScorer scorer = weight.bulkScorer(ctx); + if (scorer == null) { + return false; } - ordinalsCollectorsOverheadTooHigh++; + Bits liveDocs = ctx.reader().getLiveDocs(); + scorer.score(pruningCollector, liveDocs); + pruningCollector.postCollect(); + Releasables.close(pruningCollector); + } catch (Exception e) { + throw new OpenSearchStatusException( + "Failed when performing dynamic pruning in cardinality aggregation. You can set cluster setting [" + + CARDINALITY_AGGREGATION_PRUNING_THRESHOLD.getKey() + + "] to 0 to disable.", + RestStatus.INTERNAL_SERVER_ERROR, + e + ); } + return true; + } - stringHashingCollectorsUsed++; - return new DirectCollector(counts, MurmurHash3Values.hash(valuesSource.bytesValues(ctx))); + private Collector getNoOpCollector() { + return new Collector() { + @Override + public void close() {} + + @Override + public void postCollect() throws IOException {} + + @Override + public void collect(int doc, long owningBucketOrd) throws IOException { + throw new CollectionTerminatedException(); + } + }; } @Override @@ -175,7 +290,7 @@ public InternalAggregation buildAggregation(long owningBucketOrdinal) { if (counts == null || owningBucketOrdinal >= counts.maxOrd() || counts.cardinality(owningBucketOrdinal) == 0) { return buildEmptyAggregation(); } - // We need to build a copy because the returned Aggregation needs remain usable after + // We need to build a copy because the returned Aggregation needs to remain usable after // this Aggregator (and its HLL++ counters) is released. AbstractHyperLogLogPlusPlus copy = counts.clone(owningBucketOrdinal, BigArrays.NON_RECYCLING_INSTANCE); return new InternalCardinality(name, copy, metadata()); @@ -199,6 +314,7 @@ public void collectDebugInfo(BiConsumer add) { add.accept("ordinals_collectors_used", ordinalsCollectorsUsed); add.accept("ordinals_collectors_overhead_too_high", ordinalsCollectorsOverheadTooHigh); add.accept("string_hashing_collectors_used", stringHashingCollectorsUsed); + add.accept("dynamic_pruned_segments", dynamicPrunedSegments); } /** @@ -212,6 +328,130 @@ private abstract static class Collector extends LeafBucketCollector implements R } + /** + * This collector enhance the delegate collector with pruning ability on term field + * The iterators of term field values are wrapped into a priority queue, and able to + * pop/prune the values after being collected + */ + private static class PruningCollector extends Collector { + + private final Collector delegate; + private final DisiPriorityQueue queue; + private final DocIdSetIterator competitiveIterator; + + PruningCollector(Collector delegate, TermsEnum terms, LeafReaderContext ctx, SearchContext context, String field) + throws IOException { + this.delegate = delegate; + + Map postingMap = new HashMap<>(); + while (terms.next() != null) { + BytesRef term = terms.term(); + TermQuery termQuery = new TermQuery(new Term(field, term)); + Weight subWeight = termQuery.createWeight(context.searcher(), ScoreMode.COMPLETE_NO_SCORES, 1f); + Scorer scorer = subWeight.scorer(ctx); + postingMap.put(term, scorer); + } + + this.queue = new DisiPriorityQueue(postingMap.size()); + for (Scorer scorer : postingMap.values()) { + queue.add(new DisiWrapper(scorer)); + } + + competitiveIterator = new DisjunctionDISI(queue); + } + + @Override + public void close() { + delegate.close(); + } + + @Override + public void collect(int doc, long owningBucketOrd) throws IOException { + delegate.collect(doc, owningBucketOrd); + prune(doc); + } + + /** + * Note: the queue may be empty or the queue top may be null after pruning + */ + private void prune(int doc) { + DisiWrapper top = queue.top(); + int curTopDoc = top.doc; + if (curTopDoc == doc) { + do { + queue.pop(); + top = queue.updateTop(); + } while (queue.size() > 1 && top.doc == curTopDoc); + } + } + + @Override + public DocIdSetIterator competitiveIterator() { + return competitiveIterator; + } + + @Override + public void postCollect() throws IOException { + delegate.postCollect(); + } + } + + /** + * This DISI is a disjunction of all terms in a segment + * And it will be the competitive iterator of the leaf pruning collector + * After pruning done after collect, queue top doc may exceed the next doc of (lead) iterator + * To still providing a docID slower than the lead iterator for the next iteration + * We keep track of a slowDocId that will be updated later during advance + */ + private static class DisjunctionDISI extends DocIdSetIterator { + private final DisiPriorityQueue queue; + private int slowDocId = -1; + + public DisjunctionDISI(DisiPriorityQueue queue) { + this.queue = queue; + } + + @Override + public int docID() { + return slowDocId; + } + + @Override + public int advance(int target) throws IOException { + DisiWrapper top = queue.top(); + if (top == null) { + return slowDocId = NO_MORE_DOCS; + } + + // This would be the outcome of last pruning + // this DISI's docID is already making to the target + if (top.doc >= target) { + slowDocId = top.doc; + return top.doc; + } + + do { + top.doc = top.approximation.advance(target); + top = queue.updateTop(); + } while (top.doc < target); + slowDocId = queue.size() == 0 ? NO_MORE_DOCS : queue.top().doc; + + return slowDocId; + } + + @Override + public int nextDoc() { + // don't expect this to be called based on its usage in DefaultBulkScorer + throw new UnsupportedOperationException(); + } + + @Override + public long cost() { + // don't expect this to be called based on its usage in DefaultBulkScorer + throw new UnsupportedOperationException(); + } + } + /** * Empty Collector for the Cardinality agg * diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalCardinality.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalCardinality.java index 7e9511ffdd379..9f9ad63220fea 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalCardinality.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalCardinality.java @@ -117,7 +117,6 @@ public InternalAggregation reduce(List aggregations, Reduce return aggregations.get(0); } else { return new InternalCardinality(name, reduced, getMetadata()); - } } diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index 0c8240d3a8322..bc4b7058651dd 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -522,4 +522,8 @@ public String toString() { public int maxAggRewriteFilters() { return 0; } + + public int cardinalityAggregationPruningThreshold() { + return 0; + } } diff --git a/server/src/main/java/org/opensearch/search/query/QueryPhase.java b/server/src/main/java/org/opensearch/search/query/QueryPhase.java index 608649ad22b23..55b7c0bc5178d 100644 --- a/server/src/main/java/org/opensearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/opensearch/search/query/QueryPhase.java @@ -354,6 +354,9 @@ private static boolean searchWithCollector( try { searcher.search(query, queryCollector); } catch (EarlyTerminatingCollector.EarlyTerminationException e) { + // EarlyTerminationException is not caught in ContextIndexSearcher to allow force termination of collection. Postcollection + // still needs to be processed for Aggregations when early termination takes place. + searchContext.bucketCollectorProcessor().processPostCollection(queryCollector); queryResult.terminatedEarly(true); } if (searchContext.isSearchTimedOut()) { diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index 0b9026b81eb4e..056ef0fac0153 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -115,6 +115,7 @@ public static class Names { public static final String REMOTE_PURGE = "remote_purge"; public static final String REMOTE_REFRESH_RETRY = "remote_refresh_retry"; public static final String REMOTE_RECOVERY = "remote_recovery"; + public static final String REMOTE_STATE_READ = "remote_state_read"; public static final String INDEX_SEARCHER = "index_searcher"; } @@ -186,6 +187,7 @@ public static ThreadPoolType fromType(String type) { map.put(Names.REMOTE_PURGE, ThreadPoolType.SCALING); map.put(Names.REMOTE_REFRESH_RETRY, ThreadPoolType.SCALING); map.put(Names.REMOTE_RECOVERY, ThreadPoolType.SCALING); + map.put(Names.REMOTE_STATE_READ, ThreadPoolType.SCALING); map.put(Names.INDEX_SEARCHER, ThreadPoolType.RESIZABLE); THREAD_POOL_TYPES = Collections.unmodifiableMap(map); } @@ -279,6 +281,15 @@ public ThreadPool( TimeValue.timeValueMinutes(5) ) ); + builders.put( + Names.REMOTE_STATE_READ, + new ScalingExecutorBuilder( + Names.REMOTE_STATE_READ, + 1, + twiceAllocatedProcessors(allocatedProcessors), + TimeValue.timeValueMinutes(5) + ) + ); builders.put( Names.INDEX_SEARCHER, new ResizableExecutorBuilder( diff --git a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactoryTests.java b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactoryTests.java index d0c2cca4b46f0..39294ee8da41e 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactoryTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactoryTests.java @@ -14,6 +14,9 @@ import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; import java.util.function.Supplier; @@ -23,13 +26,21 @@ public class RemoteRoutingTableServiceFactoryTests extends OpenSearchTestCase { Supplier repositoriesService; + private ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + @After + public void teardown() throws Exception { + super.tearDown(); + threadPool.shutdown(); + } public void testGetServiceWhenRemoteRoutingDisabled() { Settings settings = Settings.builder().build(); RemoteRoutingTableService service = RemoteRoutingTableServiceFactory.getService( repositoriesService, settings, - new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool ); assertTrue(service instanceof NoopRemoteRoutingTableService); } @@ -44,8 +55,10 @@ public void testGetServiceWhenRemoteRoutingEnabled() { RemoteRoutingTableService service = RemoteRoutingTableServiceFactory.getService( repositoriesService, settings, - new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool ); assertTrue(service instanceof InternalRemoteRoutingTableService); } + } diff --git a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java index 8fd410e774332..839ebe1ff8301 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java @@ -18,6 +18,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.CheckedRunnable; import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; @@ -26,6 +27,7 @@ import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.compress.DeflateCompressor; +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; @@ -33,7 +35,8 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; import org.opensearch.gateway.remote.ClusterMetadataManifest; -import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.gateway.remote.RemoteStateTransferException; +import org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable; import org.opensearch.index.remote.RemoteStoreEnums; import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteStoreUtils; @@ -41,30 +44,37 @@ import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.repositories.blobstore.BlobStoreRepository; -import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.function.Supplier; import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; import static org.opensearch.cluster.routing.remote.InternalRemoteRoutingTableService.INDEX_ROUTING_FILE_PREFIX; import static org.opensearch.cluster.routing.remote.InternalRemoteRoutingTableService.INDEX_ROUTING_PATH_TOKEN; import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; -import static org.opensearch.gateway.remote.RemoteClusterStateService.DELIMITER; +import static org.opensearch.gateway.remote.ClusterMetadataManifestTests.randomUploadedIndexMetadataList; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; -import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.startsWith; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -80,6 +90,9 @@ public class RemoteRoutingTableServiceTests extends OpenSearchTestCase { private BlobStore blobStore; private BlobContainer blobContainer; private BlobPath basePath; + private ClusterSettings clusterSettings; + private ClusterService clusterService; + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); @Before public void setup() { @@ -89,16 +102,16 @@ public void setup() { Settings settings = Settings.builder() .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, "routing_repository") - .put(FsRepository.REPOSITORIES_COMPRESS_SETTING.getKey(), false) .build(); - + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + clusterService = mock(ClusterService.class); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); blobStoreRepository = mock(BlobStoreRepository.class); when(blobStoreRepository.getCompressor()).thenReturn(new DeflateCompressor()); blobStore = mock(BlobStore.class); blobContainer = mock(BlobContainer.class); when(repositoriesService.repository("routing_repository")).thenReturn(blobStoreRepository); when(blobStoreRepository.blobStore()).thenReturn(blobStore); - Settings nodeSettings = Settings.builder().put(REMOTE_PUBLICATION_EXPERIMENTAL, "true").build(); FeatureFlags.initializeFeatureFlags(nodeSettings); @@ -107,14 +120,17 @@ public void setup() { remoteRoutingTableService = new InternalRemoteRoutingTableService( repositoriesServiceSupplier, settings, - new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool ); + } @After public void teardown() throws Exception { super.tearDown(); - remoteRoutingTableService.close(); + remoteRoutingTableService.doClose(); + threadPool.shutdown(); } public void testFailInitializationWhenRemoteRoutingDisabled() { @@ -124,7 +140,8 @@ public void testFailInitializationWhenRemoteRoutingDisabled() { () -> new InternalRemoteRoutingTableService( repositoriesServiceSupplier, settings, - new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool ) ); } @@ -170,6 +187,39 @@ public void testGetIndicesRoutingMapDiff() { assertEquals(0, diff.getDeletes().size()); } + public void testGetChangedIndicesRouting() { + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + final Index index = new Index(indexName, "uuid"); + final IndexMetadata indexMetadata = new IndexMetadata.Builder(indexName).settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, "uuid") + .build() + ).numberOfShards(1).numberOfReplicas(1).build(); + + RoutingTable routingTable = RoutingTable.builder().addAsNew(indexMetadata).build(); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT).routingTable(routingTable).build(); + + assertEquals( + 0, + remoteRoutingTableService.getIndicesRoutingMapDiff(state.getRoutingTable(), state.getRoutingTable()).getUpserts().size() + ); + + // Reversing order to check for equality without order. + IndexRoutingTable indexRouting = routingTable.getIndicesRouting().get(indexName); + IndexRoutingTable indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(indexRouting.getShards().get(0).replicaShards().get(0)) + .addShard(indexRouting.getShards().get(0).primaryShard()) + .build(); + ClusterState newState = ClusterState.builder(ClusterName.DEFAULT) + .routingTable(RoutingTable.builder().add(indexRoutingTable).build()) + .build(); + assertEquals( + 0, + remoteRoutingTableService.getIndicesRoutingMapDiff(state.getRoutingTable(), newState.getRoutingTable()).getUpserts().size() + ); + } + public void testGetIndicesRoutingMapDiffIndexAdded() { String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); final IndexMetadata indexMetadata = new IndexMetadata.Builder(indexName).settings( @@ -177,11 +227,11 @@ public void testGetIndicesRoutingMapDiffIndexAdded() { .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_INDEX_UUID, "uuid") .build() - ).numberOfShards(randomInt(1000)).numberOfReplicas(randomInt(10)).build(); + ).numberOfShards(between(1, 1000)).numberOfReplicas(randomInt(10)).build(); RoutingTable routingTable = RoutingTable.builder().addAsNew(indexMetadata).build(); String indexName2 = randomAlphaOfLength(randomIntBetween(1, 50)); - int noOfShards = randomInt(1000); + int noOfShards = between(1, 1000); int noOfReplicas = randomInt(10); final IndexMetadata indexMetadata2 = new IndexMetadata.Builder(indexName2).settings( Settings.builder() @@ -202,8 +252,7 @@ public void testGetIndicesRoutingMapDiffIndexAdded() { public void testGetIndicesRoutingMapDiffShardChanged() { String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); - final Index index = new Index(indexName, "uuid"); - int noOfShards = randomInt(1000); + int noOfShards = between(1, 1000); int noOfReplicas = randomInt(10); final IndexMetadata indexMetadata = new IndexMetadata.Builder(indexName).settings( Settings.builder() @@ -249,8 +298,7 @@ public void testGetIndicesRoutingMapDiffShardChanged() { public void testGetIndicesRoutingMapDiffShardDetailChanged() { String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); - final Index index = new Index(indexName, "uuid"); - int noOfShards = randomInt(1000); + int noOfShards = between(1, 1000); int noOfReplicas = randomInt(10); final IndexMetadata indexMetadata = new IndexMetadata.Builder(indexName).settings( Settings.builder() @@ -278,7 +326,7 @@ public void testGetIndicesRoutingMapDiffIndexDeleted() { .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_INDEX_UUID, "uuid") .build() - ).numberOfShards(randomInt(1000)).numberOfReplicas(randomInt(10)).build(); + ).numberOfShards(between(1, 1000)).numberOfReplicas(randomInt(10)).build(); RoutingTable routingTable = RoutingTable.builder().addAsNew(indexMetadata).build(); String indexName2 = randomAlphaOfLength(randomIntBetween(1, 50)); @@ -287,7 +335,7 @@ public void testGetIndicesRoutingMapDiffIndexDeleted() { .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_INDEX_UUID, "uuid2") .build() - ).numberOfShards(randomInt(1000)).numberOfReplicas(randomInt(10)).build(); + ).numberOfShards(between(1, 1000)).numberOfReplicas(randomInt(10)).build(); RoutingTable routingTable2 = RoutingTable.builder().addAsNew(indexMetadata2).build(); DiffableUtils.MapDiff> diff = remoteRoutingTableService @@ -352,7 +400,7 @@ public void testGetIndexRoutingAsyncActionFailureInBlobRepo() throws IOException RemoteStoreUtils.invertLong(clusterState.version()) ); verify(blobContainer, times(1)).writeBlob(startsWith(expectedFilePrefix), any(StreamInput.class), anyLong(), eq(true)); - verify(listener, times(1)).onFailure(any(RemoteClusterStateService.RemoteStateTransferException.class)); + verify(listener, times(1)).onFailure(any(RemoteStateTransferException.class)); } public void testGetIndexRoutingAsyncActionAsyncRepo() throws IOException { @@ -419,7 +467,7 @@ public void testGetIndexRoutingAsyncActionAsyncRepoFailureInRepo() throws IOExce ); assertNotNull(runnable); runnable.run(); - verify(listener, times(1)).onFailure(any(RemoteClusterStateService.RemoteStateTransferException.class)); + verify(listener, times(1)).onFailure(any(RemoteStateTransferException.class)); } public void testGetAllUploadedIndicesRouting() { @@ -530,13 +578,177 @@ public void testGetAllUploadedIndicesRoutingNoChange() { assertEquals(uploadedIndexMetadata2, allIndiceRoutingMetadata.get(1)); } + public void testIndicesRoutingDiffWhenIndexDeleted() { + + ClusterState state = createIndices(randomIntBetween(1, 100)); + RoutingTable routingTable = state.routingTable(); + + List allIndices = new ArrayList<>(); + routingTable.getIndicesRouting().forEach((k, v) -> allIndices.add(k)); + + String indexNameToDelete = allIndices.get(randomIntBetween(0, allIndices.size() - 1)); + RoutingTable updatedRoutingTable = RoutingTable.builder(routingTable).remove(indexNameToDelete).build(); + + assertEquals( + 1, + remoteRoutingTableService.getIndicesRoutingMapDiff(state.getRoutingTable(), updatedRoutingTable).getDeletes().size() + ); + assertEquals( + indexNameToDelete, + remoteRoutingTableService.getIndicesRoutingMapDiff(state.getRoutingTable(), updatedRoutingTable).getDeletes().get(0) + ); + } + + public void testIndicesRoutingDiffWhenIndexDeletedAndAdded() { + + ClusterState state = createIndices(randomIntBetween(1, 100)); + RoutingTable routingTable = state.routingTable(); + + List allIndices = new ArrayList<>(); + routingTable.getIndicesRouting().forEach((k, v) -> allIndices.add(k)); + + String indexNameToDelete = allIndices.get(randomIntBetween(0, allIndices.size() - 1)); + RoutingTable.Builder updatedRoutingTableBuilder = RoutingTable.builder(routingTable).remove(indexNameToDelete); + + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + final IndexMetadata indexMetadata = new IndexMetadata.Builder(indexName).settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, "uuid") + .build() + ).numberOfShards(1).numberOfReplicas(1).build(); + + RoutingTable updatedRoutingTable = updatedRoutingTableBuilder.addAsNew(indexMetadata).build(); + + assertEquals( + 1, + remoteRoutingTableService.getIndicesRoutingMapDiff(state.getRoutingTable(), updatedRoutingTable).getDeletes().size() + ); + assertEquals( + indexNameToDelete, + remoteRoutingTableService.getIndicesRoutingMapDiff(state.getRoutingTable(), updatedRoutingTable).getDeletes().get(0) + ); + + assertEquals( + 1, + remoteRoutingTableService.getIndicesRoutingMapDiff(state.getRoutingTable(), updatedRoutingTable).getUpserts().size() + ); + assertTrue( + remoteRoutingTableService.getIndicesRoutingMapDiff(state.getRoutingTable(), updatedRoutingTable) + .getUpserts() + .containsKey(indexName) + ); + } + + public void testGetAsyncIndexMetadataReadAction() throws Exception { + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + ClusterState clusterState = createClusterState(indexName); + String uploadedFileName = String.format(Locale.ROOT, "index-routing/" + indexName); + Index index = new Index(indexName, "uuid-01"); + + LatchedActionListener listener = mock(LatchedActionListener.class); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + BytesStreamOutput streamOutput = new BytesStreamOutput(); + RemoteIndexRoutingTable remoteIndexRoutingTable = new RemoteIndexRoutingTable( + clusterState.routingTable().getIndicesRouting().get(indexName) + ); + remoteIndexRoutingTable.writeTo(streamOutput); + when(blobContainer.readBlob(indexName)).thenReturn(streamOutput.bytes().streamInput()); + remoteRoutingTableService.start(); + + CheckedRunnable runnable = remoteRoutingTableService.getAsyncIndexRoutingReadAction(uploadedFileName, index, listener); + assertNotNull(runnable); + runnable.run(); + + assertBusy(() -> verify(blobContainer, times(1)).readBlob(any())); + assertBusy(() -> verify(listener, times(1)).onResponse(any(IndexRoutingTable.class))); + } + + public void testGetAsyncIndexMetadataReadActionFailureForIncorrectIndex() throws Exception { + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + ClusterState clusterState = createClusterState(indexName); + String uploadedFileName = String.format(Locale.ROOT, "index-routing/" + indexName); + Index index = new Index("incorrect-index", "uuid-01"); + + LatchedActionListener listener = mock(LatchedActionListener.class); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + BytesStreamOutput streamOutput = new BytesStreamOutput(); + RemoteIndexRoutingTable remoteIndexRoutingTable = new RemoteIndexRoutingTable( + clusterState.routingTable().getIndicesRouting().get(indexName) + ); + remoteIndexRoutingTable.writeTo(streamOutput); + when(blobContainer.readBlob(anyString())).thenReturn(streamOutput.bytes().streamInput()); + remoteRoutingTableService.doStart(); + + CheckedRunnable runnable = remoteRoutingTableService.getAsyncIndexRoutingReadAction(uploadedFileName, index, listener); + assertNotNull(runnable); + runnable.run(); + + assertBusy(() -> verify(blobContainer, times(1)).readBlob(any())); + assertBusy(() -> verify(listener, times(1)).onFailure(any(Exception.class))); + } + + public void testGetAsyncIndexMetadataReadActionFailureInBlobRepo() throws Exception { + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + String uploadedFileName = String.format(Locale.ROOT, "index-routing/" + indexName); + Index index = new Index(indexName, "uuid-01"); + + LatchedActionListener listener = mock(LatchedActionListener.class); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + doThrow(new IOException("testing failure")).when(blobContainer).readBlob(indexName); + remoteRoutingTableService.doStart(); + + CheckedRunnable runnable = remoteRoutingTableService.getAsyncIndexRoutingReadAction(uploadedFileName, index, listener); + assertNotNull(runnable); + runnable.run(); + + assertBusy(() -> verify(listener, times(1)).onFailure(any(RemoteStateTransferException.class))); + } + + public void testGetUpdatedIndexRoutingTableMetadataWhenNoChange() { + List updatedIndicesRouting = new ArrayList<>(); + List indicesRouting = randomUploadedIndexMetadataList(); + List updatedIndexMetadata = remoteRoutingTableService + .getUpdatedIndexRoutingTableMetadata(updatedIndicesRouting, indicesRouting); + assertEquals(0, updatedIndexMetadata.size()); + } + + public void testGetUpdatedIndexRoutingTableMetadataWhenIndexIsUpdated() { + List updatedIndicesRouting = new ArrayList<>(); + List indicesRouting = randomUploadedIndexMetadataList(); + ClusterMetadataManifest.UploadedIndexMetadata expectedIndexRouting = indicesRouting.get( + randomIntBetween(0, indicesRouting.size() - 1) + ); + updatedIndicesRouting.add(expectedIndexRouting.getIndexName()); + List updatedIndexMetadata = remoteRoutingTableService + .getUpdatedIndexRoutingTableMetadata(updatedIndicesRouting, indicesRouting); + assertEquals(1, updatedIndexMetadata.size()); + assertEquals(expectedIndexRouting, updatedIndexMetadata.get(0)); + } + + private ClusterState createIndices(int numberOfIndices) { + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + for (int i = 0; i < numberOfIndices; i++) { + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + final IndexMetadata indexMetadata = new IndexMetadata.Builder(indexName).settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, "uuid") + .build() + ).numberOfShards(1).numberOfReplicas(1).build(); + + routingTableBuilder.addAsNew(indexMetadata); + } + return ClusterState.builder(ClusterName.DEFAULT).routingTable(routingTableBuilder.build()).build(); + } + private ClusterState createClusterState(String indexName) { final IndexMetadata indexMetadata = new IndexMetadata.Builder(indexName).settings( Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_INDEX_UUID, "uuid") .build() - ).numberOfShards(randomInt(1000)).numberOfReplicas(randomInt(10)).build(); + ).numberOfShards(between(1, 1000)).numberOfReplicas(randomInt(10)).build(); RoutingTable routingTable = RoutingTable.builder().addAsNew(indexMetadata).build(); return ClusterState.builder(ClusterName.DEFAULT) .routingTable(routingTable) @@ -552,4 +764,28 @@ private BlobPath getPath() { RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64 ); } + + public void testDeleteStaleIndexRoutingPaths() throws IOException { + doNothing().when(blobContainer).deleteBlobsIgnoringIfNotExists(any()); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + List stalePaths = Arrays.asList("path1", "path2"); + remoteRoutingTableService.doStart(); + remoteRoutingTableService.deleteStaleIndexRoutingPaths(stalePaths); + verify(blobContainer).deleteBlobsIgnoringIfNotExists(stalePaths); + } + + public void testDeleteStaleIndexRoutingPathsThrowsIOException() throws IOException { + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + List stalePaths = Arrays.asList("path1", "path2"); + // Simulate an IOException + doThrow(new IOException("test exception")).when(blobContainer).deleteBlobsIgnoringIfNotExists(Mockito.anyList()); + + remoteRoutingTableService.doStart(); + IOException thrown = assertThrows(IOException.class, () -> { + remoteRoutingTableService.deleteStaleIndexRoutingPaths(stalePaths); + }); + assertEquals("test exception", thrown.getMessage()); + verify(blobContainer).deleteBlobsIgnoringIfNotExists(stalePaths); + } + } diff --git a/server/src/test/java/org/opensearch/common/hash/MessageDigestsTests.java b/server/src/test/java/org/opensearch/common/hash/MessageDigestsTests.java index 9e793e5487eb8..6b7cfb4c8932c 100644 --- a/server/src/test/java/org/opensearch/common/hash/MessageDigestsTests.java +++ b/server/src/test/java/org/opensearch/common/hash/MessageDigestsTests.java @@ -91,6 +91,31 @@ public void testSha256() throws Exception { ); } + public void testSha3256() throws Exception { + assertHash("a7ffc6f8bf1ed76651c14756a061d662f580ff4de43b49fa82d80a4b80f8434a", "", MessageDigests.sha3256()); + assertHash("3a985da74fe225b2045c172d6bd390bd855f086e3e9d525b46bfe24511431532", "abc", MessageDigests.sha3256()); + assertHash( + "41c0dba2a9d6240849100376a8235e2c82e1b9998a999e21db32dd97496d3376", + "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", + MessageDigests.sha3256() + ); + assertHash( + "5c8875ae474a3634ba4fd55ec85bffd661f32aca75c6d699d0cdcb6c115891c1", + new String(new char[1000000]).replace("\0", "a"), + MessageDigests.sha3256() + ); + assertHash( + "69070dda01975c8c120c3aada1b282394e7f032fa9cf32f4cb2259a0897dfc04", + "The quick brown fox jumps over the lazy dog", + MessageDigests.sha3256() + ); + assertHash( + "cc80b0b13ba89613d93f02ee7ccbe72ee26c6edfe577f22e63a1380221caedbc", + "The quick brown fox jumps over the lazy cog", + MessageDigests.sha3256() + ); + } + public void testToHexString() throws Exception { BigInteger expected = BigInteger.probablePrime(256, random()); byte[] bytes = expected.toByteArray(); diff --git a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java index 4fd8986d0b428..28edace8433e6 100644 --- a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java @@ -51,6 +51,7 @@ import java.util.Locale; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.startsWith; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; @@ -186,7 +187,7 @@ public void testIncompatiblePatterns() { .parseDateTime("2019-01-01T01:01:01.001+0000"); String jodaZoneId = DateTimeFormat.forPattern("YYYY-MM-dd'T'HH:mm:ss.SSSz").print(dateTime); assertThat(javaZoneId, equalTo("2019-01-01T01:01:01.001Z")); - assertThat(jodaZoneId, equalTo("2019-01-01T01:01:01.001UTC")); + assertThat(jodaZoneId, startsWith("2019-01-01T01:01:01.001")); } private void assertSameMillis(String input, String jodaFormat, String javaFormat) { diff --git a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java index 7b113961fc2c7..8e8d80c870ddf 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -489,7 +489,8 @@ public void testDataOnlyNodePersistence() throws Exception { clusterService, () -> 0L, threadPool, - List.of(new RemoteIndexPathUploader(threadPool, settings, repositoriesServiceSupplier, clusterSettings)) + List.of(new RemoteIndexPathUploader(threadPool, settings, repositoriesServiceSupplier, clusterSettings)), + writableRegistry() ); } else { return null; diff --git a/server/src/test/java/org/opensearch/gateway/PrimaryShardBatchAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/PrimaryShardBatchAllocatorTests.java index 522ad2a64ea5d..e90850de3fe33 100644 --- a/server/src/test/java/org/opensearch/gateway/PrimaryShardBatchAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/PrimaryShardBatchAllocatorTests.java @@ -9,6 +9,7 @@ import org.apache.lucene.codecs.Codec; import org.opensearch.Version; +import org.opensearch.cluster.ClusterInfo; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.OpenSearchAllocationTestCase; @@ -19,12 +20,15 @@ import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.opensearch.cluster.routing.allocation.AllocationDecision; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.opensearch.common.Nullable; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; import org.opensearch.core.index.shard.ShardId; @@ -44,6 +48,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; import static org.opensearch.cluster.routing.UnassignedInfo.Reason.CLUSTER_RECOVERED; @@ -87,42 +92,28 @@ private void allocateAllUnassignedBatch(final RoutingAllocation allocation) { public void testMakeAllocationDecisionDataFetching() { final RoutingAllocation allocation = routingAllocationWithOnePrimary(noAllocationDeciders(), CLUSTER_RECOVERED, "allocId1"); - - List shards = new ArrayList<>(); - allocateAllUnassignedBatch(allocation); ShardRouting shard = allocation.routingTable().getIndicesRouting().get("test").shard(shardId.id()).primaryShard(); - shards.add(shard); - HashMap allDecisions = batchAllocator.makeAllocationDecision(shards, allocation, logger); - // verify we get decisions for all the shards - assertEquals(shards.size(), allDecisions.size()); - assertEquals(shards, new ArrayList<>(allDecisions.keySet())); - assertEquals(AllocationDecision.AWAITING_INFO, allDecisions.get(shard).getAllocationDecision()); + AllocateUnassignedDecision allocateUnassignedDecision = batchAllocator.makeAllocationDecision(shard, allocation, logger); + assertEquals(AllocationDecision.AWAITING_INFO, allocateUnassignedDecision.getAllocationDecision()); } public void testMakeAllocationDecisionForReplicaShard() { final RoutingAllocation allocation = routingAllocationWithOnePrimary(noAllocationDeciders(), CLUSTER_RECOVERED, "allocId1"); List replicaShards = allocation.routingTable().getIndicesRouting().get("test").shard(shardId.id()).replicaShards(); - List shards = new ArrayList<>(replicaShards); - HashMap allDecisions = batchAllocator.makeAllocationDecision(shards, allocation, logger); - // verify we get decisions for all the shards - assertEquals(shards.size(), allDecisions.size()); - assertEquals(shards, new ArrayList<>(allDecisions.keySet())); - assertFalse(allDecisions.get(replicaShards.get(0)).isDecisionTaken()); + for (ShardRouting shardRouting : replicaShards) { + AllocateUnassignedDecision allocateUnassignedDecision = batchAllocator.makeAllocationDecision(shardRouting, allocation, logger); + assertFalse(allocateUnassignedDecision.isDecisionTaken()); + } } public void testMakeAllocationDecisionDataFetched() { final RoutingAllocation allocation = routingAllocationWithOnePrimary(noAllocationDeciders(), CLUSTER_RECOVERED, "allocId1"); - List shards = new ArrayList<>(); ShardRouting shard = allocation.routingTable().getIndicesRouting().get("test").shard(shardId.id()).primaryShard(); - shards.add(shard); batchAllocator.addData(node1, "allocId1", true, new ReplicationCheckpoint(shardId, 20, 101, 1, Codec.getDefault().getName())); - HashMap allDecisions = batchAllocator.makeAllocationDecision(shards, allocation, logger); - // verify we get decisions for all the shards - assertEquals(shards.size(), allDecisions.size()); - assertEquals(shards, new ArrayList<>(allDecisions.keySet())); - assertEquals(AllocationDecision.YES, allDecisions.get(shard).getAllocationDecision()); + AllocateUnassignedDecision allocateUnassignedDecision = batchAllocator.makeAllocationDecision(shard, allocation, logger); + assertEquals(AllocationDecision.YES, allocateUnassignedDecision.getAllocationDecision()); } public void testMakeAllocationDecisionDataFetchedMultipleShards() { @@ -149,13 +140,88 @@ public void testMakeAllocationDecisionDataFetchedMultipleShards() { null ); } - HashMap allDecisions = batchAllocator.makeAllocationDecision(shards, allocation, logger); - // verify we get decisions for all the shards - assertEquals(shards.size(), allDecisions.size()); - assertEquals(new HashSet<>(shards), allDecisions.keySet()); - for (ShardRouting shard : shards) { - assertEquals(AllocationDecision.YES, allDecisions.get(shard).getAllocationDecision()); + for (ShardRouting shardRouting : shards) { + AllocateUnassignedDecision allocateUnassignedDecision = batchAllocator.makeAllocationDecision(shardRouting, allocation, logger); + assertEquals(AllocationDecision.YES, allocateUnassignedDecision.getAllocationDecision()); + } + } + + public void testInitializePrimaryShards() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AllocationDeciders allocationDeciders = randomAllocationDeciders(Settings.builder().build(), clusterSettings, random()); + setUpShards(2); + final RoutingAllocation routingAllocation = routingAllocationWithMultiplePrimaries( + allocationDeciders, + CLUSTER_RECOVERED, + 2, + 0, + "allocId-0", + "allocId-1" + ); + + for (ShardId shardId : shardsInBatch) { + batchAllocator.addShardData( + node1, + "allocId-" + shardId.id(), + shardId, + true, + new ReplicationCheckpoint(shardId, 20, 101, 1, Codec.getDefault().getName()), + null + ); + } + + allocateAllUnassignedBatch(routingAllocation); + + assertEquals(0, routingAllocation.routingNodes().unassigned().size()); + List initializingShards = routingAllocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING); + assertEquals(2, initializingShards.size()); + assertTrue(shardsInBatch.contains(initializingShards.get(0).shardId())); + assertTrue(shardsInBatch.contains(initializingShards.get(1).shardId())); + assertEquals(2, routingAllocation.routingNodes().getInitialPrimariesIncomingRecoveries(node1.getId())); + } + + public void testAllocateUnassignedBatchThrottlingAllocationDeciderIsHonoured() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AllocationDeciders allocationDeciders = randomAllocationDeciders( + Settings.builder() + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.getKey(), 1) + .build(), + clusterSettings, + random() + ); + setUpShards(2); + final RoutingAllocation routingAllocation = routingAllocationWithMultiplePrimaries( + allocationDeciders, + CLUSTER_RECOVERED, + 2, + 0, + "allocId-0", + "allocId-1" + ); + + for (ShardId shardId : shardsInBatch) { + batchAllocator.addShardData( + node1, + "allocId-" + shardId.id(), + shardId, + true, + new ReplicationCheckpoint(shardId, 20, 101, 1, Codec.getDefault().getName()), + null + ); } + + allocateAllUnassignedBatch(routingAllocation); + + // Verify the throttling decider was not throttled, recovering shards on node greater than initial concurrent recovery setting + assertEquals(1, routingAllocation.routingNodes().getInitialPrimariesIncomingRecoveries(node1.getId())); + List initializingShards = routingAllocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING); + assertEquals(1, initializingShards.size()); + Set nodesWithInitialisingShards = initializingShards.stream().map(ShardRouting::currentNodeId).collect(Collectors.toSet()); + assertEquals(1, nodesWithInitialisingShards.size()); + assertEquals(Collections.singleton(node1.getId()), nodesWithInitialisingShards); + List ignoredShards = routingAllocation.routingNodes().unassigned().ignored(); + assertEquals(1, ignoredShards.size()); + assertEquals(UnassignedInfo.AllocationStatus.DECIDERS_THROTTLED, ignoredShards.get(0).unassignedInfo().getLastAllocationStatus()); } private RoutingAllocation routingAllocationWithOnePrimary( @@ -235,7 +301,7 @@ private RoutingAllocation routingAllocationWithMultiplePrimaries( .routingTable(routingTableBuilder.build()) .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)) .build(); - return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, null, null, System.nanoTime()); + return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, null, System.nanoTime()); } class TestBatchAllocator extends PrimaryShardBatchAllocator { diff --git a/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java index 464038c93228b..2e148c2bc8130 100644 --- a/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java @@ -28,16 +28,19 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.TestShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.decider.AllocationDecider; import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.cluster.routing.allocation.decider.SameShardAllocationDecider; +import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.opensearch.common.Nullable; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.Engine; import org.opensearch.index.seqno.ReplicationTracker; @@ -72,6 +75,7 @@ public class ReplicaShardBatchAllocatorTests extends OpenSearchAllocationTestCas private static final org.apache.lucene.util.Version MIN_SUPPORTED_LUCENE_VERSION = org.opensearch.Version.CURRENT .minimumIndexCompatibilityVersion().luceneVersion; private final ShardId shardId = new ShardId("test", "_na_", 0); + private static Set shardsInBatch; private final DiscoveryNode node1 = newNode("node1"); private final DiscoveryNode node2 = newNode("node2"); private final DiscoveryNode node3 = newNode("node3"); @@ -83,6 +87,14 @@ public void buildTestAllocator() { this.testBatchAllocator = new TestBatchAllocator(); } + public static void setUpShards(int numberOfShards) { + shardsInBatch = new HashSet<>(); + for (int shardNumber = 0; shardNumber < numberOfShards; shardNumber++) { + ShardId shardId = new ShardId("test", "_na_", shardNumber); + shardsInBatch.add(shardId); + } + } + private void allocateAllUnassignedBatch(final RoutingAllocation allocation) { final RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); List shardToBatch = new ArrayList<>(); @@ -115,8 +127,6 @@ public void testAsyncFetchWithNoShardOnIndexCreation() { ); testBatchAllocator.clean(); allocateAllUnassignedBatch(allocation); - assertThat(testBatchAllocator.getFetchDataCalledAndClean(), equalTo(false)); - assertThat(testBatchAllocator.getShardEligibleFetchDataCountAndClean(), equalTo(0)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); } @@ -634,6 +644,60 @@ public void testDoNotCancelForBrokenNode() { assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED), empty()); } + public void testAllocateUnassignedBatchThrottlingAllocationDeciderIsHonoured() throws InterruptedException { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AllocationDeciders allocationDeciders = randomAllocationDeciders( + Settings.builder() + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), 1) + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), 1) + .build(), + clusterSettings, + random() + ); + setUpShards(2); + final RoutingAllocation routingAllocation = twoPrimaryAndOneUnAssignedReplica(allocationDeciders); + for (ShardId shardIdFromBatch : shardsInBatch) { + testBatchAllocator.addShardData( + node1, + shardIdFromBatch, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ) + .addShardData( + node2, + shardIdFromBatch, + "NO_MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ) + .addShardData( + node3, + shardIdFromBatch, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + } + allocateAllUnassignedBatch(routingAllocation); + // Verify the throttling decider was throttled, incoming recoveries on a node should be + // lesser than or equal to allowed concurrent recoveries + assertEquals(0, routingAllocation.routingNodes().getIncomingRecoveries(node2.getId())); + assertEquals(1, routingAllocation.routingNodes().getIncomingRecoveries(node3.getId())); + List initializingShards = routingAllocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING); + assertEquals(1, initializingShards.size()); + List ignoredShardRoutings = routingAllocation.routingNodes().unassigned().ignored(); + assertEquals(1, ignoredShardRoutings.size()); + // Allocation status for ignored replicas shards is not updated after running the deciders they just get marked as ignored. + assertEquals(UnassignedInfo.AllocationStatus.NO_ATTEMPT, ignoredShardRoutings.get(0).unassignedInfo().getLastAllocationStatus()); + AllocateUnassignedDecision allocateUnassignedDecision = testBatchAllocator.makeAllocationDecision( + ignoredShardRoutings.get(0), + routingAllocation, + logger + ); + assertEquals(UnassignedInfo.AllocationStatus.DECIDERS_THROTTLED, allocateUnassignedDecision.getAllocationStatus()); + } + private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders deciders) { return onePrimaryOnNode1And1Replica(deciders, Settings.EMPTY, UnassignedInfo.Reason.CLUSTER_RECOVERED); } @@ -692,6 +756,77 @@ private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders decide ); } + private RoutingAllocation twoPrimaryAndOneUnAssignedReplica(AllocationDeciders deciders) throws InterruptedException { + Map shardIdShardRoutingMap = new HashMap<>(); + Index index = shardId.getIndex(); + + // Created started ShardRouting for each primary shards + for (ShardId shardIdFromBatch : shardsInBatch) { + shardIdShardRoutingMap.put( + shardIdFromBatch, + TestShardRouting.newShardRouting(shardIdFromBatch, node1.getId(), true, ShardRoutingState.STARTED) + ); + } + + // Create Index Metadata + IndexMetadata.Builder indexMetadata = IndexMetadata.builder(index.getName()) + .settings(settings(Version.CURRENT).put(Settings.EMPTY)) + .numberOfShards(2) + .numberOfReplicas(1); + for (ShardId shardIdFromBatch : shardsInBatch) { + indexMetadata.putInSyncAllocationIds( + shardIdFromBatch.id(), + Sets.newHashSet(shardIdShardRoutingMap.get(shardIdFromBatch).allocationId().getId()) + ); + } + Metadata metadata = Metadata.builder().put(indexMetadata).build(); + + // Create Index Routing table + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); + for (ShardId shardIdFromBatch : shardsInBatch) { + IndexShardRoutingTable.Builder indexShardRoutingTableBuilder = new IndexShardRoutingTable.Builder(shardIdFromBatch); + // Add a primary shard in started state + indexShardRoutingTableBuilder.addShard(shardIdShardRoutingMap.get(shardIdFromBatch)); + // Add replicas of primary shard in un-assigned state. + for (int i = 0; i < 1; i++) { + indexShardRoutingTableBuilder.addShard( + ShardRouting.newUnassigned( + shardIdFromBatch, + false, + RecoverySource.PeerRecoverySource.INSTANCE, + new UnassignedInfo( + UnassignedInfo.Reason.CLUSTER_RECOVERED, + null, + null, + 0, + System.nanoTime(), + System.currentTimeMillis(), + false, + UnassignedInfo.AllocationStatus.NO_ATTEMPT, + Collections.emptySet() + ) + ) + ); + } + indexRoutingTableBuilder.addIndexShard(indexShardRoutingTableBuilder.build()); + } + + RoutingTable routingTable = RoutingTable.builder().add(indexRoutingTableBuilder.build()).build(); + ClusterState state = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)) + .build(); + return new RoutingAllocation( + deciders, + new RoutingNodes(state, false), + state, + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + System.nanoTime() + ); + } + private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders, UnassignedInfo unassignedInfo) { ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId, node1.getId(), true, ShardRoutingState.STARTED); Metadata metadata = Metadata.builder() @@ -755,7 +890,7 @@ static String randomSyncId() { } class TestBatchAllocator extends ReplicaShardBatchAllocator { - private Map data = null; + private Map data = null; private AtomicBoolean fetchDataCalled = new AtomicBoolean(false); private AtomicInteger eligibleShardFetchDataCount = new AtomicInteger(0); @@ -800,6 +935,55 @@ public TestBatchAllocator addData( } data.put( node, + new TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadataBatch( + node, + Map.of( + shardId, + new TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadata( + new TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata( + shardId, + new Store.MetadataSnapshot(unmodifiableMap(filesAsMap), unmodifiableMap(commitData), randomInt()), + peerRecoveryRetentionLeases + ), + storeFileFetchException + ) + ) + ) + ); + return this; + } + + public TestBatchAllocator addShardData( + DiscoveryNode node, + ShardId shardId, + String syncId, + @Nullable Exception storeFileFetchException, + StoreFileMetadata... files + ) { + return addShardData(node, Collections.emptyList(), shardId, syncId, storeFileFetchException, files); + } + + public TestBatchAllocator addShardData( + DiscoveryNode node, + List peerRecoveryRetentionLeases, + ShardId shardId, + String syncId, + @Nullable Exception storeFileFetchException, + StoreFileMetadata... files + ) { + if (data == null) { + data = new HashMap<>(); + } + Map filesAsMap = new HashMap<>(); + for (StoreFileMetadata file : files) { + filesAsMap.put(file.name(), file); + } + Map commitData = new HashMap<>(); + if (syncId != null) { + commitData.put(Engine.SYNC_COMMIT_ID, syncId); + } + + TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadata nodeStoreFilesMetadata = new TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadata( new TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata( shardId, @@ -807,8 +991,19 @@ public TestBatchAllocator addData( peerRecoveryRetentionLeases ), storeFileFetchException - ) + ); + Map shardIdNodeStoreFilesMetadataHashMap = + new HashMap<>(); + if (data.containsKey(node)) { + NodeStoreFilesMetadataBatch nodeStoreFilesMetadataBatch = data.get(node); + shardIdNodeStoreFilesMetadataHashMap.putAll(nodeStoreFilesMetadataBatch.getNodeStoreFilesMetadataBatch()); + } + shardIdNodeStoreFilesMetadataHashMap.put(shardId, nodeStoreFilesMetadata); + data.put( + node, + new TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadataBatch(node, shardIdNodeStoreFilesMetadataHashMap) ); + return this; } @@ -820,25 +1015,7 @@ protected AsyncShardFetch.FetchResult fetchData( ) { fetchDataCalled.set(true); eligibleShardFetchDataCount.set(eligibleShards.size()); - Map tData = null; - if (data != null) { - tData = new HashMap<>(); - for (Map.Entry entry : data.entrySet()) { - Map shardData = Map.of( - shardId, - entry.getValue() - ); - tData.put( - entry.getKey(), - new TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadataBatch(entry.getKey(), shardData) - ); - } - } - return new AsyncShardFetch.FetchResult<>(tData, new HashMap<>() { - { - put(shardId, Collections.emptySet()); - } - }); + return new AsyncShardFetch.FetchResult<>(data, Collections.>emptyMap()); } @Override diff --git a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java index 6da8cbff2c688..02471c9cdbbbe 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java @@ -37,8 +37,12 @@ import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V1; import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_BLOCKS; import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.DISCOVERY_NODES; -import static org.opensearch.gateway.remote.RemoteClusterStateServiceTests.generateClusterStateWithOneIndex; +import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA; +import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_DELIMITER; +import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_METADATA; import static org.opensearch.gateway.remote.model.RemoteHashesOfConsistentSettings.HASHES_OF_CONSISTENT_SETTINGS; +import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTING_METADATA; +import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA; import static org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata.TRANSIENT_SETTING_METADATA; public class ClusterMetadataManifestTests extends OpenSearchTestCase { @@ -110,24 +114,22 @@ public void testClusterMetadataManifestXContent() throws IOException { .indices(Collections.singletonList(uploadedIndexMetadata)) .previousClusterUUID("prev-cluster-uuid") .clusterUUIDCommitted(true) - .coordinationMetadata(new UploadedMetadataAttribute(RemoteClusterStateService.COORDINATION_METADATA, "coordination-file")) - .settingMetadata(new UploadedMetadataAttribute(RemoteClusterStateService.SETTING_METADATA, "setting-file")) - .templatesMetadata(new UploadedMetadataAttribute(RemoteClusterStateService.TEMPLATES_METADATA, "templates-file")) + .coordinationMetadata(new UploadedMetadataAttribute(COORDINATION_METADATA, "coordination-file")) + .settingMetadata(new UploadedMetadataAttribute(SETTING_METADATA, "setting-file")) + .templatesMetadata(new UploadedMetadataAttribute(TEMPLATES_METADATA, "templates-file")) .customMetadataMap( Collections.unmodifiableList( Arrays.asList( new UploadedMetadataAttribute( - RemoteClusterStateService.CUSTOM_METADATA + RemoteClusterStateService.CUSTOM_DELIMITER - + RepositoriesMetadata.TYPE, + CUSTOM_METADATA + CUSTOM_DELIMITER + RepositoriesMetadata.TYPE, "custom--repositories-file" ), new UploadedMetadataAttribute( - RemoteClusterStateService.CUSTOM_METADATA + RemoteClusterStateService.CUSTOM_DELIMITER + IndexGraveyard.TYPE, + CUSTOM_METADATA + CUSTOM_DELIMITER + IndexGraveyard.TYPE, "custom--index_graveyard-file" ), new UploadedMetadataAttribute( - RemoteClusterStateService.CUSTOM_METADATA + RemoteClusterStateService.CUSTOM_DELIMITER - + WeightedRoutingMetadata.TYPE, + CUSTOM_METADATA + CUSTOM_DELIMITER + WeightedRoutingMetadata.TYPE, "custom--weighted_routing_netadata-file" ) ) @@ -159,24 +161,22 @@ public void testClusterMetadataManifestSerializationEqualsHashCode() { .indices(randomUploadedIndexMetadataList()) .previousClusterUUID("yfObdx8KSMKKrXf8UyHhM") .clusterUUIDCommitted(true) - .coordinationMetadata(new UploadedMetadataAttribute(RemoteClusterStateService.COORDINATION_METADATA, "coordination-file")) - .settingMetadata(new UploadedMetadataAttribute(RemoteClusterStateService.SETTING_METADATA, "setting-file")) - .templatesMetadata(new UploadedMetadataAttribute(RemoteClusterStateService.TEMPLATES_METADATA, "templates-file")) + .coordinationMetadata(new UploadedMetadataAttribute(COORDINATION_METADATA, "coordination-file")) + .settingMetadata(new UploadedMetadataAttribute(SETTING_METADATA, "setting-file")) + .templatesMetadata(new UploadedMetadataAttribute(TEMPLATES_METADATA, "templates-file")) .customMetadataMap( Collections.unmodifiableList( Arrays.asList( new UploadedMetadataAttribute( - RemoteClusterStateService.CUSTOM_METADATA + RemoteClusterStateService.CUSTOM_DELIMITER - + RepositoriesMetadata.TYPE, + CUSTOM_METADATA + CUSTOM_DELIMITER + RepositoriesMetadata.TYPE, "custom--repositories-file" ), new UploadedMetadataAttribute( - RemoteClusterStateService.CUSTOM_METADATA + RemoteClusterStateService.CUSTOM_DELIMITER + IndexGraveyard.TYPE, + CUSTOM_METADATA + CUSTOM_DELIMITER + IndexGraveyard.TYPE, "custom--index_graveyard-file" ), new UploadedMetadataAttribute( - RemoteClusterStateService.CUSTOM_METADATA + RemoteClusterStateService.CUSTOM_DELIMITER - + WeightedRoutingMetadata.TYPE, + CUSTOM_METADATA + CUSTOM_DELIMITER + WeightedRoutingMetadata.TYPE, "custom--weighted_routing_netadata-file" ) ) @@ -188,7 +188,12 @@ public void testClusterMetadataManifestSerializationEqualsHashCode() { .transientSettingsMetadata(new UploadedMetadataAttribute(TRANSIENT_SETTING_METADATA, "transient-settings-file")) .hashesOfConsistentSettings(new UploadedMetadataAttribute(HASHES_OF_CONSISTENT_SETTINGS, "hashes-of-consistent-settings-file")) .clusterStateCustomMetadataMap(Collections.emptyMap()) - .diffManifest(new ClusterStateDiffManifest(generateClusterStateWithOneIndex().build(), ClusterState.EMPTY_STATE)) + .diffManifest( + new ClusterStateDiffManifest( + RemoteClusterStateServiceTests.generateClusterStateWithOneIndex().build(), + ClusterState.EMPTY_STATE + ) + ) .build(); { // Mutate Cluster Term EqualsHashCodeTestUtils.checkEqualsAndHashCode( @@ -494,17 +499,15 @@ public void testClusterMetadataManifestXContentV2() throws IOException { Collections.unmodifiableList( Arrays.asList( new UploadedMetadataAttribute( - RemoteClusterStateService.CUSTOM_METADATA + RemoteClusterStateService.CUSTOM_DELIMITER - + RepositoriesMetadata.TYPE, + CUSTOM_METADATA + CUSTOM_DELIMITER + RepositoriesMetadata.TYPE, "custom--repositories-file" ), new UploadedMetadataAttribute( - RemoteClusterStateService.CUSTOM_METADATA + RemoteClusterStateService.CUSTOM_DELIMITER + IndexGraveyard.TYPE, + CUSTOM_METADATA + CUSTOM_DELIMITER + IndexGraveyard.TYPE, "custom--index_graveyard-file" ), new UploadedMetadataAttribute( - RemoteClusterStateService.CUSTOM_METADATA + RemoteClusterStateService.CUSTOM_DELIMITER - + WeightedRoutingMetadata.TYPE, + CUSTOM_METADATA + CUSTOM_DELIMITER + WeightedRoutingMetadata.TYPE, "custom--weighted_routing_netadata-file" ) ) @@ -517,7 +520,12 @@ public void testClusterMetadataManifestXContentV2() throws IOException { .transientSettingsMetadata(uploadedMetadataAttribute) .hashesOfConsistentSettings(uploadedMetadataAttribute) .clusterStateCustomMetadataMap(Collections.emptyMap()) - .diffManifest(new ClusterStateDiffManifest(generateClusterStateWithOneIndex().build(), ClusterState.EMPTY_STATE)) + .diffManifest( + new ClusterStateDiffManifest( + RemoteClusterStateServiceTests.generateClusterStateWithOneIndex().build(), + ClusterState.EMPTY_STATE + ) + ) .build(); final XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); @@ -558,17 +566,15 @@ public void testClusterMetadataManifestXContentV2WithoutEphemeral() throws IOExc Collections.unmodifiableList( Arrays.asList( new UploadedMetadataAttribute( - RemoteClusterStateService.CUSTOM_METADATA + RemoteClusterStateService.CUSTOM_DELIMITER - + RepositoriesMetadata.TYPE, + CUSTOM_METADATA + CUSTOM_DELIMITER + RepositoriesMetadata.TYPE, "custom--repositories-file" ), new UploadedMetadataAttribute( - RemoteClusterStateService.CUSTOM_METADATA + RemoteClusterStateService.CUSTOM_DELIMITER + IndexGraveyard.TYPE, + CUSTOM_METADATA + CUSTOM_DELIMITER + IndexGraveyard.TYPE, "custom--index_graveyard-file" ), new UploadedMetadataAttribute( - RemoteClusterStateService.CUSTOM_METADATA + RemoteClusterStateService.CUSTOM_DELIMITER - + WeightedRoutingMetadata.TYPE, + CUSTOM_METADATA + CUSTOM_DELIMITER + WeightedRoutingMetadata.TYPE, "custom--weighted_routing_netadata-file" ) ) @@ -587,7 +593,7 @@ public void testClusterMetadataManifestXContentV2WithoutEphemeral() throws IOExc } } - private List randomUploadedIndexMetadataList() { + public static List randomUploadedIndexMetadataList() { final int size = randomIntBetween(1, 10); final List uploadedIndexMetadataList = new ArrayList<>(size); while (uploadedIndexMetadataList.size() < size) { @@ -596,7 +602,7 @@ private List randomUploadedIndexMetadataList() { return uploadedIndexMetadataList; } - private UploadedIndexMetadata randomUploadedIndexMetadata() { + private static UploadedIndexMetadata randomUploadedIndexMetadata() { return new UploadedIndexMetadata(randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10)); } diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManagerTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManagerTests.java new file mode 100644 index 0000000000000..41e1546ead164 --- /dev/null +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManagerTests.java @@ -0,0 +1,319 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.Version; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.AbstractNamedDiffable; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterState.Custom; +import org.opensearch.cluster.DiffableUtils; +import org.opensearch.cluster.block.ClusterBlocks; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.CheckedRunnable; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.compress.NoneCompressor; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.gateway.remote.model.RemoteClusterBlocks; +import org.opensearch.gateway.remote.model.RemoteDiscoveryNodes; +import org.opensearch.gateway.remote.model.RemoteReadResult; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +import static java.util.Collections.emptyList; +import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.DISCOVERY_NODES; +import static org.opensearch.gateway.remote.model.RemoteClusterBlocks.CLUSTER_BLOCKS; +import static org.opensearch.gateway.remote.model.RemoteClusterBlocks.CLUSTER_BLOCKS_FORMAT; +import static org.opensearch.gateway.remote.model.RemoteClusterBlocksTests.randomClusterBlocks; +import static org.opensearch.gateway.remote.model.RemoteDiscoveryNodes.DISCOVERY_NODES_FORMAT; +import static org.opensearch.gateway.remote.model.RemoteDiscoveryNodesTests.getDiscoveryNodes; +import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.anyIterable; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RemoteClusterStateAttributesManagerTests extends OpenSearchTestCase { + private RemoteClusterStateAttributesManager remoteClusterStateAttributesManager; + private BlobStoreTransferService blobStoreTransferService; + private BlobStoreRepository blobStoreRepository; + private Compressor compressor; + private ThreadPool threadPool = new TestThreadPool(RemoteClusterStateAttributesManagerTests.class.getName()); + + @Before + public void setup() throws Exception { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(emptyList()); + blobStoreRepository = mock(BlobStoreRepository.class); + blobStoreTransferService = mock(BlobStoreTransferService.class); + compressor = new NoneCompressor(); + + remoteClusterStateAttributesManager = new RemoteClusterStateAttributesManager( + "test-cluster", + blobStoreRepository, + blobStoreTransferService, + writableRegistry(), + threadPool + ); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdown(); + } + + public void testGetAsyncMetadataReadAction_DiscoveryNodes() throws IOException { + DiscoveryNodes discoveryNodes = getDiscoveryNodes(); + String fileName = randomAlphaOfLength(10); + when(blobStoreTransferService.downloadBlob(anyIterable(), anyString())).thenReturn( + DISCOVERY_NODES_FORMAT.serialize(discoveryNodes, fileName, compressor).streamInput() + ); + RemoteDiscoveryNodes remoteObjForDownload = new RemoteDiscoveryNodes(fileName, "cluster-uuid", compressor); + CountDownLatch latch = new CountDownLatch(1); + AtomicReference readDiscoveryNodes = new AtomicReference<>(); + LatchedActionListener assertingListener = new LatchedActionListener<>( + ActionListener.wrap(response -> readDiscoveryNodes.set((DiscoveryNodes) response.getObj()), Assert::assertNull), + latch + ); + CheckedRunnable runnable = remoteClusterStateAttributesManager.getAsyncMetadataReadAction( + DISCOVERY_NODES, + remoteObjForDownload, + assertingListener + ); + + try { + runnable.run(); + latch.await(); + assertEquals(discoveryNodes.getSize(), readDiscoveryNodes.get().getSize()); + discoveryNodes.getNodes().forEach((nodeId, node) -> assertEquals(readDiscoveryNodes.get().get(nodeId), node)); + assertEquals(discoveryNodes.getClusterManagerNodeId(), readDiscoveryNodes.get().getClusterManagerNodeId()); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void testGetAsyncMetadataReadAction_ClusterBlocks() throws IOException { + ClusterBlocks clusterBlocks = randomClusterBlocks(); + String fileName = randomAlphaOfLength(10); + when(blobStoreTransferService.downloadBlob(anyIterable(), anyString())).thenReturn( + CLUSTER_BLOCKS_FORMAT.serialize(clusterBlocks, fileName, compressor).streamInput() + ); + RemoteClusterBlocks remoteClusterBlocks = new RemoteClusterBlocks(fileName, "cluster-uuid", compressor); + CountDownLatch latch = new CountDownLatch(1); + AtomicReference readClusterBlocks = new AtomicReference<>(); + LatchedActionListener assertingListener = new LatchedActionListener<>( + ActionListener.wrap(response -> readClusterBlocks.set((ClusterBlocks) response.getObj()), Assert::assertNull), + latch + ); + + CheckedRunnable runnable = remoteClusterStateAttributesManager.getAsyncMetadataReadAction( + CLUSTER_BLOCKS, + remoteClusterBlocks, + assertingListener + ); + + try { + runnable.run(); + latch.await(); + assertEquals(clusterBlocks.global(), readClusterBlocks.get().global()); + assertEquals(clusterBlocks.indices().keySet(), readClusterBlocks.get().indices().keySet()); + for (String index : clusterBlocks.indices().keySet()) { + assertEquals(clusterBlocks.indices().get(index), readClusterBlocks.get().indices().get(index)); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void testGetUpdatedCustoms() { + Map previousCustoms = Map.of( + TestCustom1.TYPE, + new TestCustom1("data1"), + TestCustom2.TYPE, + new TestCustom2("data2"), + TestCustom3.TYPE, + new TestCustom3("data3") + ); + ClusterState previousState = ClusterState.builder(new ClusterName("test-cluster")).customs(previousCustoms).build(); + + Map currentCustoms = Map.of( + TestCustom2.TYPE, + new TestCustom2("data2"), + TestCustom3.TYPE, + new TestCustom3("data3-changed"), + TestCustom4.TYPE, + new TestCustom4("data4") + ); + + ClusterState currentState = ClusterState.builder(new ClusterName("test-cluster")).customs(currentCustoms).build(); + + DiffableUtils.MapDiff> customsDiff = + remoteClusterStateAttributesManager.getUpdatedCustoms(currentState, previousState, false, randomBoolean()); + assertThat(customsDiff.getUpserts(), is(Collections.emptyMap())); + assertThat(customsDiff.getDeletes(), is(Collections.emptyList())); + + customsDiff = remoteClusterStateAttributesManager.getUpdatedCustoms(currentState, previousState, true, true); + assertThat(customsDiff.getUpserts(), is(currentCustoms)); + assertThat(customsDiff.getDeletes(), is(Collections.emptyList())); + + Map expectedCustoms = Map.of( + TestCustom3.TYPE, + new TestCustom3("data3-changed"), + TestCustom4.TYPE, + new TestCustom4("data4") + ); + + customsDiff = remoteClusterStateAttributesManager.getUpdatedCustoms(currentState, previousState, true, false); + assertThat(customsDiff.getUpserts(), is(expectedCustoms)); + assertThat(customsDiff.getDeletes(), is(List.of(TestCustom1.TYPE))); + } + + private static abstract class AbstractTestCustom extends AbstractNamedDiffable implements ClusterState.Custom { + + private final String value; + + AbstractTestCustom(String value) { + this.value = value; + } + + AbstractTestCustom(StreamInput in) throws IOException { + this.value = in.readString(); + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(value); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } + + @Override + public boolean isPrivate() { + return true; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + AbstractTestCustom that = (AbstractTestCustom) o; + + if (!value.equals(that.value)) return false; + + return true; + } + + @Override + public int hashCode() { + return value.hashCode(); + } + } + + private static class TestCustom1 extends AbstractTestCustom { + + private static final String TYPE = "custom_1"; + + TestCustom1(String value) { + super(value); + } + + TestCustom1(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return TYPE; + } + } + + private static class TestCustom2 extends AbstractTestCustom { + + private static final String TYPE = "custom_2"; + + TestCustom2(String value) { + super(value); + } + + TestCustom2(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return TYPE; + } + } + + private static class TestCustom3 extends AbstractTestCustom { + + private static final String TYPE = "custom_3"; + + TestCustom3(String value) { + super(value); + } + + TestCustom3(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return TYPE; + } + } + + private static class TestCustom4 extends AbstractTestCustom { + + private static final String TYPE = "custom_4"; + + TestCustom4(String value) { + super(value); + } + + TestCustom4(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return TYPE; + } + } +} diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerTests.java index 24fd1b164a4ff..ec7e3c1ce81d3 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerTests.java @@ -12,6 +12,9 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.remote.InternalRemoteRoutingTableService; +import org.opensearch.cluster.routing.remote.NoopRemoteRoutingTableService; +import org.opensearch.cluster.routing.remote.RemoteRoutingTableService; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.blobstore.BlobContainer; @@ -54,21 +57,19 @@ import static org.opensearch.gateway.remote.RemoteClusterStateCleanupManager.REMOTE_CLUSTER_STATE_CLEANUP_INTERVAL_SETTING; import static org.opensearch.gateway.remote.RemoteClusterStateCleanupManager.RETAINED_MANIFESTS; import static org.opensearch.gateway.remote.RemoteClusterStateCleanupManager.SKIP_CLEANUP_STATE_CHANGES; -import static org.opensearch.gateway.remote.RemoteClusterStateService.CLUSTER_STATE_PATH_TOKEN; -import static org.opensearch.gateway.remote.RemoteClusterStateService.COORDINATION_METADATA; -import static org.opensearch.gateway.remote.RemoteClusterStateService.DELIMITER; -import static org.opensearch.gateway.remote.RemoteClusterStateService.GLOBAL_METADATA_PATH_TOKEN; -import static org.opensearch.gateway.remote.RemoteClusterStateService.INDEX_PATH_TOKEN; -import static org.opensearch.gateway.remote.RemoteClusterStateService.MANIFEST_FILE_PREFIX; -import static org.opensearch.gateway.remote.RemoteClusterStateService.MANIFEST_PATH_TOKEN; -import static org.opensearch.gateway.remote.RemoteClusterStateService.SETTING_METADATA; -import static org.opensearch.gateway.remote.RemoteClusterStateService.TEMPLATES_METADATA; -import static org.opensearch.gateway.remote.RemoteClusterStateService.encodeString; -import static org.opensearch.gateway.remote.RemoteClusterStateServiceTests.generateClusterStateWithOneIndex; -import static org.opensearch.gateway.remote.RemoteClusterStateServiceTests.nodesWithLocalNodeClusterManager; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.GLOBAL_METADATA_PATH_TOKEN; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.encodeString; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.getFormattedIndexFileName; +import static org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest.MANIFEST; +import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA; +import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTING_METADATA; +import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.eq; @@ -92,6 +93,9 @@ public class RemoteClusterStateCleanupManagerTests extends OpenSearchTestCase { private ClusterState clusterState; private Metadata metadata; private RemoteClusterStateService remoteClusterStateService; + private RemoteManifestManager remoteManifestManager; + private RemoteRoutingTableService remoteRoutingTableService; + private ClusterService clusterService; private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); @Before @@ -113,6 +117,7 @@ public void setup() { Settings settings = Settings.builder() .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, "remote_store_repository") + .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, "routing_repository") .put(stateRepoTypeAttributeKey, FsRepository.TYPE) .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) @@ -122,7 +127,7 @@ public void setup() { clusterApplierService = mock(ClusterApplierService.class); clusterState = mock(ClusterState.class); metadata = mock(Metadata.class); - ClusterService clusterService = mock(ClusterService.class); + clusterService = mock(ClusterService.class); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); when(clusterState.getClusterName()).thenReturn(new ClusterName("test")); when(metadata.clusterUUID()).thenReturn("testUUID"); @@ -135,11 +140,19 @@ public void setup() { when(blobStoreRepository.blobStore()).thenReturn(blobStore); when(repositoriesService.repository("remote_store_repository")).thenReturn(blobStoreRepository); + remoteManifestManager = mock(RemoteManifestManager.class); remoteClusterStateService = mock(RemoteClusterStateService.class); + when(remoteClusterStateService.getRemoteManifestManager()).thenReturn(remoteManifestManager); when(remoteClusterStateService.getStats()).thenReturn(new RemotePersistenceStats()); when(remoteClusterStateService.getThreadpool()).thenReturn(threadPool); when(remoteClusterStateService.getBlobStore()).thenReturn(blobStore); - remoteClusterStateCleanupManager = new RemoteClusterStateCleanupManager(remoteClusterStateService, clusterService); + when(remoteClusterStateService.getBlobStoreRepository()).thenReturn(blobStoreRepository); + remoteRoutingTableService = mock(InternalRemoteRoutingTableService.class); + remoteClusterStateCleanupManager = new RemoteClusterStateCleanupManager( + remoteClusterStateService, + clusterService, + remoteRoutingTableService + ); } @After @@ -155,15 +168,17 @@ public void testDeleteClusterMetadata() throws IOException { List inactiveBlobs = Arrays.asList( new PlainBlobMetadata("manifest1.dat", 1L), new PlainBlobMetadata("manifest2.dat", 1L), - new PlainBlobMetadata("manifest3.dat", 1L) + new PlainBlobMetadata("manifest3.dat", 1L), + new PlainBlobMetadata("manifest6.dat", 1L) ); List activeBlobs = Arrays.asList( new PlainBlobMetadata("manifest4.dat", 1L), - new PlainBlobMetadata("manifest5.dat", 1L) + new PlainBlobMetadata("manifest5.dat", 1L), + new PlainBlobMetadata("manifest7.dat", 1L) ); - UploadedIndexMetadata index1Metadata = new UploadedIndexMetadata("index1", "indexUUID1", "index_metadata1"); - UploadedIndexMetadata index2Metadata = new UploadedIndexMetadata("index2", "indexUUID2", "index_metadata2"); - UploadedIndexMetadata index1UpdatedMetadata = new UploadedIndexMetadata("index1", "indexUUID1", "index_metadata1_updated"); + UploadedIndexMetadata index1Metadata = new UploadedIndexMetadata("index1", "indexUUID1", "index_metadata1__1"); + UploadedIndexMetadata index2Metadata = new UploadedIndexMetadata("index2", "indexUUID2", "index_metadata2__2"); + UploadedIndexMetadata index1UpdatedMetadata = new UploadedIndexMetadata("index1", "indexUUID1", "index_metadata1_updated__2"); UploadedMetadataAttribute coordinationMetadata = new UploadedMetadataAttribute(COORDINATION_METADATA, "coordination_metadata"); UploadedMetadataAttribute templateMetadata = new UploadedMetadataAttribute(TEMPLATES_METADATA, "template_metadata"); UploadedMetadataAttribute settingMetadata = new UploadedMetadataAttribute(SETTING_METADATA, "settings_metadata"); @@ -199,45 +214,157 @@ public void testDeleteClusterMetadata() throws IOException { .settingMetadata(settingMetadataUpdated) .build(); + UploadedIndexMetadata index3Metadata = new UploadedIndexMetadata("index3", "indexUUID3", "index_metadata3__2"); + UploadedIndexMetadata index4Metadata = new UploadedIndexMetadata("index4", "indexUUID4", "index_metadata4__2"); + List indicesRouting1 = List.of(index3Metadata, index4Metadata); + List indicesRouting2 = List.of(index4Metadata); + ClusterMetadataManifest manifest6 = ClusterMetadataManifest.builder() + .indices(List.of(index1Metadata)) + .coordinationMetadata(coordinationMetadataUpdated) + .templatesMetadata(templateMetadataUpdated) + .settingMetadata(settingMetadataUpdated) + .clusterTerm(1L) + .stateVersion(1L) + .codecVersion(CODEC_V2) + .stateUUID(randomAlphaOfLength(10)) + .clusterUUID(clusterUUID) + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID(ClusterState.UNKNOWN_UUID) + .committed(true) + .routingTableVersion(0L) + .indicesRouting(indicesRouting1) + .build(); + ClusterMetadataManifest manifest7 = ClusterMetadataManifest.builder() + .indices(List.of(index2Metadata)) + .coordinationMetadata(coordinationMetadataUpdated) + .templatesMetadata(templateMetadataUpdated) + .settingMetadata(settingMetadataUpdated) + .clusterTerm(1L) + .stateVersion(1L) + .codecVersion(CODEC_V2) + .stateUUID(randomAlphaOfLength(10)) + .clusterUUID(clusterUUID) + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID(ClusterState.UNKNOWN_UUID) + .committed(true) + .routingTableVersion(0L) + .indicesRouting(indicesRouting2) + .build(); + // active manifest have reference to index1Updated, index2, settingsUpdated, coordinationUpdated, templates, templatesUpdated ClusterMetadataManifest manifest4 = ClusterMetadataManifest.builder(manifest3) .coordinationMetadata(coordinationMetadataUpdated) .build(); ClusterMetadataManifest manifest5 = ClusterMetadataManifest.builder(manifest4).templatesMetadata(templateMetadataUpdated).build(); - when(remoteClusterStateService.fetchRemoteClusterMetadataManifest(eq(clusterName), eq(clusterUUID), any())).thenReturn( + when(remoteManifestManager.fetchRemoteClusterMetadataManifest(eq(clusterName), eq(clusterUUID), any())).thenReturn( manifest4, manifest5, + manifest7, manifest1, manifest2, - manifest3 + manifest3, + manifest6 + ); + when(remoteManifestManager.getManifestFolderPath(eq(clusterName), eq(clusterUUID))).thenReturn( + new BlobPath().add(encodeString(clusterName)).add(CLUSTER_STATE_PATH_TOKEN).add(clusterUUID).add(MANIFEST) ); BlobContainer container = mock(BlobContainer.class); when(blobStore.blobContainer(any())).thenReturn(container); doNothing().when(container).deleteBlobsIgnoringIfNotExists(any()); - + remoteClusterStateCleanupManager.start(); remoteClusterStateCleanupManager.deleteClusterMetadata(clusterName, clusterUUID, activeBlobs, inactiveBlobs); verify(container).deleteBlobsIgnoringIfNotExists( List.of( - new BlobPath().add(GLOBAL_METADATA_PATH_TOKEN).buildAsString() + coordinationMetadata.getUploadedFilename() + ".dat", - new BlobPath().add(GLOBAL_METADATA_PATH_TOKEN).buildAsString() + settingMetadata.getUploadedFilename() + ".dat", + // coordination/setting metadata is from CODEC_V2, the uploaded filename with contain the complete path + coordinationMetadata.getUploadedFilename(), + settingMetadata.getUploadedFilename(), new BlobPath().add(GLOBAL_METADATA_PATH_TOKEN).buildAsString() + "global_metadata.dat" ) ); - verify(container).deleteBlobsIgnoringIfNotExists( - List.of( - new BlobPath().add(INDEX_PATH_TOKEN).add(index1Metadata.getIndexUUID()).buildAsString() - + index1Metadata.getUploadedFilePath() - + ".dat" + verify(container).deleteBlobsIgnoringIfNotExists(List.of(getFormattedIndexFileName(index1Metadata.getUploadedFilePath()))); + Set staleManifest = new HashSet<>(); + inactiveBlobs.forEach( + blob -> staleManifest.add( + remoteClusterStateService.getRemoteManifestManager().getManifestFolderPath(clusterName, clusterUUID).buildAsString() + blob + .name() ) ); - Set staleManifest = new HashSet<>(); - inactiveBlobs.forEach(blob -> staleManifest.add(new BlobPath().add(MANIFEST_PATH_TOKEN).buildAsString() + blob.name())); verify(container).deleteBlobsIgnoringIfNotExists(new ArrayList<>(staleManifest)); + verify(remoteRoutingTableService).deleteStaleIndexRoutingPaths(List.of(index3Metadata.getUploadedFilename())); + } + + public void testDeleteClusterMetadataNoOpsRoutingTableService() throws IOException { + String clusterUUID = "clusterUUID"; + String clusterName = "test-cluster"; + List inactiveBlobs = Arrays.asList(new PlainBlobMetadata("manifest1.dat", 1L)); + List activeBlobs = Arrays.asList(new PlainBlobMetadata("manifest2.dat", 1L)); + + UploadedMetadataAttribute coordinationMetadata = new UploadedMetadataAttribute(COORDINATION_METADATA, "coordination_metadata"); + UploadedMetadataAttribute templateMetadata = new UploadedMetadataAttribute(TEMPLATES_METADATA, "template_metadata"); + UploadedMetadataAttribute settingMetadata = new UploadedMetadataAttribute(SETTING_METADATA, "settings_metadata"); + UploadedMetadataAttribute coordinationMetadataUpdated = new UploadedMetadataAttribute( + COORDINATION_METADATA, + "coordination_metadata_updated" + ); + + UploadedIndexMetadata index1Metadata = new UploadedIndexMetadata("index1", "indexUUID1", "index_metadata1__2"); + UploadedIndexMetadata index2Metadata = new UploadedIndexMetadata("index2", "indexUUID2", "index_metadata2__2"); + List indicesRouting1 = List.of(index1Metadata); + List indicesRouting2 = List.of(index2Metadata); + + ClusterMetadataManifest manifest1 = ClusterMetadataManifest.builder() + .indices(List.of(index1Metadata)) + .coordinationMetadata(coordinationMetadataUpdated) + .templatesMetadata(templateMetadata) + .settingMetadata(settingMetadata) + .clusterTerm(1L) + .stateVersion(1L) + .codecVersion(CODEC_V2) + .stateUUID(randomAlphaOfLength(10)) + .clusterUUID(clusterUUID) + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID(ClusterState.UNKNOWN_UUID) + .committed(true) + .routingTableVersion(0L) + .indicesRouting(indicesRouting1) + .build(); + ClusterMetadataManifest manifest2 = ClusterMetadataManifest.builder(manifest1) + .indices(List.of(index2Metadata)) + .indicesRouting(indicesRouting2) + .build(); + + BlobContainer blobContainer = mock(BlobContainer.class); + doThrow(IOException.class).when(blobContainer).delete(); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + BlobPath blobPath = new BlobPath().add("random-path"); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + remoteClusterStateCleanupManager.start(); + when(remoteManifestManager.getManifestFolderPath(eq(clusterName), eq(clusterUUID))).thenReturn( + new BlobPath().add(encodeString(clusterName)).add(CLUSTER_STATE_PATH_TOKEN).add(clusterUUID).add(MANIFEST) + ); + when(remoteManifestManager.fetchRemoteClusterMetadataManifest(eq(clusterName), eq(clusterUUID), any())).thenReturn( + manifest2, + manifest1 + ); + remoteRoutingTableService = mock(NoopRemoteRoutingTableService.class); + remoteClusterStateCleanupManager = new RemoteClusterStateCleanupManager( + remoteClusterStateService, + clusterService, + remoteRoutingTableService + ); + remoteClusterStateCleanupManager.start(); + remoteClusterStateCleanupManager.deleteClusterMetadata(clusterName, clusterUUID, activeBlobs, inactiveBlobs); + verify(remoteRoutingTableService).deleteStaleIndexRoutingPaths(List.of(index1Metadata.getUploadedFilename())); } public void testDeleteStaleClusterUUIDs() throws IOException { - final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + final ClusterState clusterState = RemoteClusterStateServiceTests.generateClusterStateWithOneIndex() + .nodes(RemoteClusterStateServiceTests.nodesWithLocalNodeClusterManager()) + .build(); ClusterMetadataManifest clusterMetadataManifest = ClusterMetadataManifest.builder() .indices(List.of()) .clusterTerm(1L) @@ -268,25 +395,21 @@ public void testDeleteStaleClusterUUIDs() throws IOException { }); when( manifest2Container.listBlobsByPrefixInSortedOrder( - MANIFEST_FILE_PREFIX + DELIMITER, + MANIFEST + DELIMITER, Integer.MAX_VALUE, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC ) ).thenReturn(List.of(new PlainBlobMetadata("mainfest2", 1L))); when( manifest3Container.listBlobsByPrefixInSortedOrder( - MANIFEST_FILE_PREFIX + DELIMITER, + MANIFEST + DELIMITER, Integer.MAX_VALUE, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC ) ).thenReturn(List.of(new PlainBlobMetadata("mainfest3", 1L))); Set uuids = new HashSet<>(Arrays.asList("cluster-uuid1", "cluster-uuid2", "cluster-uuid3")); when(remoteClusterStateService.getAllClusterUUIDs(any())).thenReturn(uuids); - when(remoteClusterStateService.getCusterMetadataBasePath(any(), any())).then( - invocationOnMock -> blobPath.add(encodeString(invocationOnMock.getArgument(0))) - .add(CLUSTER_STATE_PATH_TOKEN) - .add((String) invocationOnMock.getArgument(1)) - ); + when(blobStoreRepository.basePath()).thenReturn(blobPath); remoteClusterStateCleanupManager.start(); remoteClusterStateCleanupManager.deleteStaleClusterUUIDs(clusterState, clusterMetadataManifest); try { @@ -306,11 +429,11 @@ public void testRemoteStateCleanupFailureStats() throws IOException { BlobPath blobPath = new BlobPath().add("random-path"); when((blobStoreRepository.basePath())).thenReturn(blobPath); remoteClusterStateCleanupManager.start(); - remoteClusterStateCleanupManager.deleteStaleUUIDsClusterMetadata("cluster1", List.of("cluster-uuid1")); + remoteClusterStateCleanupManager.deleteStaleUUIDsClusterMetadata("cluster1", Arrays.asList("cluster-uuid1")); try { assertBusy(() -> { // wait for stats to get updated - assertNotNull(remoteClusterStateCleanupManager.getStats()); + assertTrue(remoteClusterStateCleanupManager.getStats() != null); assertEquals(0, remoteClusterStateCleanupManager.getStats().getSuccessCount()); assertEquals(1, remoteClusterStateCleanupManager.getStats().getCleanupAttemptFailedCount()); }); @@ -319,6 +442,79 @@ public void testRemoteStateCleanupFailureStats() throws IOException { } } + public void testIndexRoutingFilesCleanupFailureStats() throws Exception { + String clusterUUID = "clusterUUID"; + String clusterName = "test-cluster"; + List inactiveBlobs = Arrays.asList(new PlainBlobMetadata("manifest1.dat", 1L)); + List activeBlobs = Arrays.asList(new PlainBlobMetadata("manifest2.dat", 1L)); + + UploadedMetadataAttribute coordinationMetadata = new UploadedMetadataAttribute(COORDINATION_METADATA, "coordination_metadata"); + UploadedMetadataAttribute templateMetadata = new UploadedMetadataAttribute(TEMPLATES_METADATA, "template_metadata"); + UploadedMetadataAttribute settingMetadata = new UploadedMetadataAttribute(SETTING_METADATA, "settings_metadata"); + UploadedMetadataAttribute coordinationMetadataUpdated = new UploadedMetadataAttribute( + COORDINATION_METADATA, + "coordination_metadata_updated" + ); + + UploadedIndexMetadata index1Metadata = new UploadedIndexMetadata("index1", "indexUUID1", "index_metadata1__2"); + UploadedIndexMetadata index2Metadata = new UploadedIndexMetadata("index2", "indexUUID2", "index_metadata2__2"); + List indicesRouting1 = List.of(index1Metadata); + List indicesRouting2 = List.of(index2Metadata); + + ClusterMetadataManifest manifest1 = ClusterMetadataManifest.builder() + .indices(List.of(index1Metadata)) + .coordinationMetadata(coordinationMetadataUpdated) + .templatesMetadata(templateMetadata) + .settingMetadata(settingMetadata) + .clusterTerm(1L) + .stateVersion(1L) + .codecVersion(CODEC_V2) + .stateUUID(randomAlphaOfLength(10)) + .clusterUUID(clusterUUID) + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID(ClusterState.UNKNOWN_UUID) + .committed(true) + .routingTableVersion(0L) + .indicesRouting(indicesRouting1) + .build(); + ClusterMetadataManifest manifest2 = ClusterMetadataManifest.builder(manifest1) + .indices(List.of(index2Metadata)) + .indicesRouting(indicesRouting2) + .build(); + + BlobContainer blobContainer = mock(BlobContainer.class); + doThrow(IOException.class).when(blobContainer).delete(); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + + BlobPath blobPath = new BlobPath().add("random-path"); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + remoteClusterStateCleanupManager.start(); + when(remoteManifestManager.getManifestFolderPath(eq(clusterName), eq(clusterUUID))).thenReturn( + new BlobPath().add(encodeString(clusterName)).add(CLUSTER_STATE_PATH_TOKEN).add(clusterUUID).add(MANIFEST) + ); + when(remoteManifestManager.fetchRemoteClusterMetadataManifest(eq(clusterName), eq(clusterUUID), any())).thenReturn( + manifest1, + manifest2 + ); + doNothing().when(remoteRoutingTableService).deleteStaleIndexRoutingPaths(any()); + + remoteClusterStateCleanupManager.deleteClusterMetadata(clusterName, clusterUUID, activeBlobs, inactiveBlobs); + assertBusy(() -> { + // wait for stats to get updated + assertNotNull(remoteClusterStateCleanupManager.getStats()); + assertEquals(0, remoteClusterStateCleanupManager.getStats().getIndexRoutingFilesCleanupAttemptFailedCount()); + }); + + doThrow(IOException.class).when(remoteRoutingTableService).deleteStaleIndexRoutingPaths(any()); + remoteClusterStateCleanupManager.deleteClusterMetadata(clusterName, clusterUUID, activeBlobs, inactiveBlobs); + assertBusy(() -> { + // wait for stats to get updated + assertNotNull(remoteClusterStateCleanupManager.getStats()); + assertEquals(1, remoteClusterStateCleanupManager.getStats().getIndexRoutingFilesCleanupAttemptFailedCount()); + }); + } + public void testSingleConcurrentExecutionOfStaleManifestCleanup() throws Exception { BlobContainer blobContainer = mock(BlobContainer.class); when(blobStore.blobContainer(any())).thenReturn(blobContainer); diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 7e10442d011fa..c8fd982fec1e1 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -12,6 +12,8 @@ import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.RepositoryCleanupInProgress; +import org.opensearch.cluster.RepositoryCleanupInProgress.Entry; import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.cluster.metadata.IndexGraveyard; import org.opensearch.cluster.metadata.IndexMetadata; @@ -46,14 +48,16 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; +import org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest; import org.opensearch.gateway.remote.model.RemoteClusterStateManifestInfo; +import org.opensearch.gateway.remote.model.RemoteIndexMetadata; import org.opensearch.index.remote.RemoteIndexPathUploader; -import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.indices.IndicesModule; import org.opensearch.repositories.FilterRepository; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.blobstore.ChecksumWritableBlobStoreFormat; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.TestCustomMetadata; @@ -72,6 +76,7 @@ import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; @@ -91,15 +96,17 @@ import static java.util.stream.Collectors.toList; import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V1; -import static org.opensearch.gateway.remote.RemoteClusterStateService.COORDINATION_METADATA; -import static org.opensearch.gateway.remote.RemoteClusterStateService.DELIMITER; -import static org.opensearch.gateway.remote.RemoteClusterStateService.FORMAT_PARAMS; -import static org.opensearch.gateway.remote.RemoteClusterStateService.INDEX_METADATA_CURRENT_CODEC_VERSION; -import static org.opensearch.gateway.remote.RemoteClusterStateService.MANIFEST_FILE_PREFIX; -import static org.opensearch.gateway.remote.RemoteClusterStateService.METADATA_FILE_PREFIX; -import static org.opensearch.gateway.remote.RemoteClusterStateService.SETTING_METADATA; -import static org.opensearch.gateway.remote.RemoteClusterStateService.TEMPLATES_METADATA; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.FORMAT_PARAMS; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.getFormattedIndexFileName; import static org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest.MANIFEST_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA; +import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA_FORMAT; +import static org.opensearch.gateway.remote.model.RemoteGlobalMetadata.GLOBAL_METADATA_FORMAT; +import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTINGS_METADATA_FORMAT; +import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTING_METADATA; +import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA; +import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA_FORMAT; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; @@ -125,6 +132,8 @@ public class RemoteClusterStateServiceTests extends OpenSearchTestCase { private RepositoriesService repositoriesService; private BlobStoreRepository blobStoreRepository; private BlobStore blobStore; + private Settings settings; + private boolean publicationEnabled; private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); @Before @@ -144,11 +153,12 @@ public void setup() { "remote_store_repository" ); - Settings settings = Settings.builder() + settings = Settings.builder() .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, "remote_store_repository") .put(stateRepoTypeAttributeKey, FsRepository.TYPE) .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, "routing_repository") .build(); clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); @@ -176,7 +186,8 @@ public void setup() { clusterService, () -> 0L, threadPool, - List.of(new RemoteIndexPathUploader(threadPool, settings, repositoriesServiceSupplier, clusterSettings)) + List.of(new RemoteIndexPathUploader(threadPool, settings, repositoriesServiceSupplier, clusterSettings)), + writableRegistry() ); } @@ -184,6 +195,9 @@ public void setup() { public void teardown() throws Exception { super.tearDown(); remoteClusterStateService.close(); + publicationEnabled = false; + Settings nodeSettings = Settings.builder().build(); + FeatureFlags.initializeFeatureFlags(nodeSettings); threadPool.shutdown(); } @@ -208,7 +222,8 @@ public void testFailInitializationWhenRemoteStateDisabled() { clusterService, () -> 0L, threadPool, - List.of(new RemoteIndexPathUploader(threadPool, settings, repositoriesServiceSupplier, clusterSettings)) + List.of(new RemoteIndexPathUploader(threadPool, settings, repositoriesServiceSupplier, clusterSettings)), + writableRegistry() ) ); } @@ -256,9 +271,67 @@ public void testWriteFullMetadataSuccess() throws IOException { assertThat(manifest.getSettingsMetadata(), notNullValue()); assertThat(manifest.getTemplatesMetadata(), notNullValue()); assertFalse(manifest.getCustomMetadataMap().isEmpty()); + assertThat(manifest.getClusterBlocksMetadata(), nullValue()); + assertThat(manifest.getDiscoveryNodesMetadata(), nullValue()); + assertThat(manifest.getTransientSettingsMetadata(), nullValue()); + assertThat(manifest.getHashesOfConsistentSettings(), nullValue()); + assertThat(manifest.getClusterStateCustomMap().size(), is(0)); + } + + public void testWriteFullMetadataSuccessPublicationEnabled() throws IOException { + // TODO Make the publication flag parameterized + publicationEnabled = true; + Settings nodeSettings = Settings.builder().put(REMOTE_PUBLICATION_EXPERIMENTAL, publicationEnabled).build(); + FeatureFlags.initializeFeatureFlags(nodeSettings); + remoteClusterStateService = new RemoteClusterStateService( + "test-node-id", + repositoriesServiceSupplier, + settings, + clusterService, + () -> 0L, + threadPool, + List.of(new RemoteIndexPathUploader(threadPool, settings, repositoriesServiceSupplier, clusterSettings)), + writableRegistry() + ); + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()) + .customs(Map.of(RepositoryCleanupInProgress.TYPE, new RepositoryCleanupInProgress(List.of(new Entry("test-repo", 10L))))) + .build(); + mockBlobStoreObjects(); + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid") + .getClusterMetadataManifest(); + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); + List indices = List.of(uploadedIndexMetadata); + + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(indices) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .previousClusterUUID("prev-cluster-uuid") + .build(); + + assertThat(manifest.getIndices().size(), is(1)); + assertThat(manifest.getIndices().get(0).getIndexName(), is(uploadedIndexMetadata.getIndexName())); + assertThat(manifest.getIndices().get(0).getIndexUUID(), is(uploadedIndexMetadata.getIndexUUID())); + assertThat(manifest.getIndices().get(0).getUploadedFilename(), notNullValue()); + assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); + assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); + assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); + assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + assertThat(manifest.getPreviousClusterUUID(), is(expectedManifest.getPreviousClusterUUID())); + assertThat(manifest.getGlobalMetadataFileName(), nullValue()); + assertThat(manifest.getCoordinationMetadata(), notNullValue()); + assertThat(manifest.getSettingsMetadata(), notNullValue()); + assertThat(manifest.getTemplatesMetadata(), notNullValue()); + assertFalse(manifest.getCustomMetadataMap().isEmpty()); + assertThat(manifest.getClusterStateCustomMap().size(), is(1)); + assertThat(manifest.getClusterStateCustomMap().containsKey(RepositoryCleanupInProgress.TYPE), is(true)); } public void testWriteFullMetadataInParallelSuccess() throws IOException { + // TODO Add test with publication flag enabled final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); AsyncMultiStreamBlobContainer container = (AsyncMultiStreamBlobContainer) mockBlobStoreObjects(AsyncMultiStreamBlobContainer.class); @@ -311,7 +384,7 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { .provideStream(0) .getInputStream() .readAllBytes(); - IndexMetadata writtenIndexMetadata = RemoteClusterStateService.INDEX_METADATA_FORMAT.deserialize( + IndexMetadata writtenIndexMetadata = RemoteIndexMetadata.INDEX_METADATA_FORMAT.deserialize( capturedWriteContext.get("metadata").getFileName(), blobStoreRepository.getNamedXContentRegistry(), new BytesArray(writtenBytes) @@ -350,7 +423,7 @@ public void run() { remoteClusterStateService.start(); assertThrows( - RemoteClusterStateService.RemoteStateTransferException.class, + RemoteStateTransferException.class, () -> remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) ); } @@ -383,7 +456,7 @@ public void testTimeoutWhileWritingManifestFile() throws IOException { try { remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)); } catch (Exception e) { - assertTrue(e instanceof RemoteClusterStateService.RemoteStateTransferException); + assertTrue(e instanceof RemoteStateTransferException); assertTrue(e.getMessage().contains("Timed out waiting for transfer of following metadata to complete")); } } @@ -404,7 +477,7 @@ public void testWriteFullMetadataInParallelFailureForIndexMetadata() throws IOEx remoteClusterStateService.start(); assertThrows( - RemoteClusterStateService.RemoteStateTransferException.class, + RemoteStateTransferException.class, () -> remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) ); assertEquals(0, remoteClusterStateService.getStats().getSuccessCount()); @@ -450,7 +523,7 @@ public void testWriteIncrementalMetadataSuccess() throws IOException { clusterState, previousManifest ).getClusterMetadataManifest(); - final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename__2"); final List indices = List.of(uploadedIndexMetadata); final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() @@ -577,7 +650,7 @@ private void verifyWriteIncrementalGlobalMetadataFromOlderCodecSuccess(ClusterMe assertNotNull(manifest.getCoordinationMetadata()); assertNotNull(manifest.getSettingsMetadata()); assertNotNull(manifest.getTemplatesMetadata()); - assertNotEquals(0, manifest.getCustomMetadataMap().size()); + assertNotNull(manifest.getCustomMetadataMap()); assertEquals(expectedManifest.getClusterTerm(), manifest.getClusterTerm()); assertEquals(expectedManifest.getStateVersion(), manifest.getStateVersion()); @@ -591,6 +664,7 @@ public void testCoordinationMetadataOnlyUpdated() throws IOException { Function updater = (initialClusterState) -> ClusterState.builder(initialClusterState) .metadata( Metadata.builder(initialClusterState.metadata()) + .version(initialClusterState.metadata().version() + 1) .coordinationMetadata( CoordinationMetadata.builder(initialClusterState.coordinationMetadata()) .addVotingConfigExclusion(new CoordinationMetadata.VotingConfigExclusion("excludedNodeId", "excludedNodeName")) @@ -761,6 +835,7 @@ public void testCustomMetadataDeletedUpdatedAndAdded() throws IOException { .putCustom("custom1", new CustomMetadata1("mock_custom_metadata1")) .putCustom("custom2", new CustomMetadata1("mock_custom_metadata2")) .putCustom("custom3", new CustomMetadata1("mock_custom_metadata3")) + .version(initialClusterState.metadata().version() + 1) ) .build(); @@ -776,6 +851,7 @@ public void testCustomMetadataDeletedUpdatedAndAdded() throws IOException { .putCustom("custom2", new CustomMetadata1("mock_updated_custom_metadata")) .putCustom("custom3", new CustomMetadata1("mock_custom_metadata3")) .putCustom("custom4", new CustomMetadata1("mock_custom_metadata4")) + .version(clusterState1.metadata().version() + 1) ) .build(); ClusterMetadataManifest manifest2 = remoteClusterStateService.writeIncrementalMetadata(clusterState1, clusterState2, manifest1) @@ -929,24 +1005,6 @@ private void verifyMetadataAttributeOnlyUpdated( assertions.accept(initialManifest, manifestAfterMetadataUpdate); } - public void testReadLatestMetadataManifestFailedIOException() throws IOException { - final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); - - BlobContainer blobContainer = mockBlobStoreObjects(); - when(blobContainer.listBlobsByPrefixInSortedOrder("manifest" + DELIMITER, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC)) - .thenThrow(IOException.class); - - remoteClusterStateService.start(); - Exception e = assertThrows( - IllegalStateException.class, - () -> remoteClusterStateService.getLatestClusterMetadataManifest( - clusterState.getClusterName().value(), - clusterState.metadata().clusterUUID() - ) - ); - assertEquals(e.getMessage(), "Error while fetching latest manifest file for remote cluster state"); - } - public void testReadLatestMetadataManifestFailedNoManifestFileInRemote() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); @@ -1001,17 +1059,18 @@ public void testReadLatestMetadataManifestSuccessButNoIndexMetadata() throws IOE remoteClusterStateService.start(); assertEquals( - remoteClusterStateService.getLatestClusterState(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID()) - .getMetadata() - .getIndices() - .size(), + remoteClusterStateService.getLatestClusterState( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID(), + false + ).getMetadata().getIndices().size(), 0 ); } public void testReadLatestMetadataManifestSuccessButIndexMetadataFetchIOException() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); - final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename__2"); final List indices = List.of(uploadedIndexMetadata); final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() .indices(indices) @@ -1026,17 +1085,18 @@ public void testReadLatestMetadataManifestSuccessButIndexMetadataFetchIOExceptio BlobContainer blobContainer = mockBlobStoreObjects(); mockBlobContainer(blobContainer, expectedManifest, Map.of()); - when(blobContainer.readBlob(uploadedIndexMetadata.getUploadedFilename() + ".dat")).thenThrow(FileNotFoundException.class); + when(blobContainer.readBlob(uploadedIndexMetadata.getUploadedFilename())).thenThrow(FileNotFoundException.class); remoteClusterStateService.start(); Exception e = assertThrows( - IllegalStateException.class, + RemoteStateTransferException.class, () -> remoteClusterStateService.getLatestClusterState( clusterState.getClusterName().value(), - clusterState.metadata().clusterUUID() + clusterState.metadata().clusterUUID(), + false ).getMetadata().getIndices() ); - assertEquals(e.getMessage(), "Error while downloading IndexMetadata - " + uploadedIndexMetadata.getUploadedFilename()); + assertEquals("Exception during reading cluster state from remote", e.getMessage()); } public void testReadLatestMetadataManifestSuccess() throws IOException { @@ -1087,10 +1147,10 @@ public void testReadGlobalMetadata() throws IOException { .stateUUID("state-uuid") .clusterUUID("cluster-uuid") .codecVersion(MANIFEST_CURRENT_CODEC_VERSION) - .coordinationMetadata(new UploadedMetadataAttribute(COORDINATION_METADATA, "mock-coordination-file")) - .settingMetadata(new UploadedMetadataAttribute(SETTING_METADATA, "mock-setting-file")) - .templatesMetadata(new UploadedMetadataAttribute(TEMPLATES_METADATA, "mock-templates-file")) - .put(IndexGraveyard.TYPE, new UploadedMetadataAttribute(IndexGraveyard.TYPE, "mock-custom-" +IndexGraveyard.TYPE+ "-file")) + .coordinationMetadata(new ClusterMetadataManifest.UploadedMetadataAttribute(COORDINATION_METADATA, "mock-coordination-file")) + .settingMetadata(new ClusterMetadataManifest.UploadedMetadataAttribute(SETTING_METADATA, "mock-setting-file")) + .templatesMetadata(new ClusterMetadataManifest.UploadedMetadataAttribute(TEMPLATES_METADATA, "mock-templates-file")) + .put(IndexGraveyard.TYPE, new ClusterMetadataManifest.UploadedMetadataAttribute(IndexGraveyard.TYPE, "mock-custom-" +IndexGraveyard.TYPE+ "-file")) .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) .previousClusterUUID("prev-cluster-uuid") @@ -1098,15 +1158,16 @@ public void testReadGlobalMetadata() throws IOException { .indicesRouting(List.of()) .build(); - Metadata expactedMetadata = Metadata.builder().persistentSettings(Settings.builder().put("readonly", true).build()).build(); - mockBlobContainerForGlobalMetadata(mockBlobStoreObjects(), expectedManifest, expactedMetadata); + Metadata expectedMetadata = Metadata.builder().clusterUUID("cluster-uuid").persistentSettings(Settings.builder().put("readonly", true).build()).build(); + mockBlobContainerForGlobalMetadata(mockBlobStoreObjects(), expectedManifest, expectedMetadata); - ClusterState newClusterState = remoteClusterStateService.getLatestClusterState( + ClusterState newClusterState = remoteClusterStateService.getLatestClusterState( clusterState.getClusterName().value(), - clusterState.metadata().clusterUUID() + clusterState.metadata().clusterUUID(), + false ); - assertTrue(Metadata.isGlobalStateEquals(newClusterState.getMetadata(), expactedMetadata)); + assertTrue(Metadata.isGlobalStateEquals(newClusterState.getMetadata(), expectedMetadata)); long newClusterStateVersion = newClusterState.getVersion(); assert prevClusterStateVersion == newClusterStateVersion : String.format( @@ -1139,16 +1200,15 @@ public void testReadGlobalMetadataIOException() throws IOException { BlobContainer blobContainer = mockBlobStoreObjects(); mockBlobContainerForGlobalMetadata(blobContainer, expectedManifest, expactedMetadata); - when(blobContainer.readBlob(RemoteClusterStateService.GLOBAL_METADATA_FORMAT.blobName(globalIndexMetadataName))).thenThrow( - FileNotFoundException.class - ); + when(blobContainer.readBlob(GLOBAL_METADATA_FORMAT.blobName(globalIndexMetadataName))).thenThrow(FileNotFoundException.class); remoteClusterStateService.start(); Exception e = assertThrows( IllegalStateException.class, () -> remoteClusterStateService.getLatestClusterState( clusterState.getClusterName().value(), - clusterState.metadata().clusterUUID() + clusterState.metadata().clusterUUID(), + false ) ); assertEquals(e.getMessage(), "Error while downloading Global Metadata - " + globalIndexMetadataName); @@ -1159,7 +1219,7 @@ public void testReadLatestIndexMetadataSuccess() throws IOException { remoteClusterStateService.start(); final Index index = new Index("test-index", "index-uuid"); - String fileName = "metadata-" + index.getUUID(); + String fileName = "metadata-" + index.getUUID() + "__1"; final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata(index.getName(), index.getUUID(), fileName); final Settings idxSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) @@ -1187,7 +1247,8 @@ public void testReadLatestIndexMetadataSuccess() throws IOException { Map indexMetadataMap = remoteClusterStateService.getLatestClusterState( clusterState.getClusterName().value(), - clusterState.metadata().clusterUUID() + clusterState.metadata().clusterUUID(), + false ).getMetadata().getIndices(); assertEquals(indexMetadataMap.size(), 1); @@ -1319,96 +1380,12 @@ public void testRemoteStateStats() throws IOException { assertEquals(0, remoteClusterStateService.getStats().getFailedCount()); } - public void testFileNames() { - final Index index = new Index("test-index", "index-uuid"); - final Settings idxSettings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) - .build(); - final IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings) - .numberOfShards(1) - .numberOfReplicas(0) - .build(); - - String indexMetadataFileName = RemoteClusterStateService.indexMetadataFileName(indexMetadata); - String[] splittedIndexMetadataFileName = indexMetadataFileName.split(DELIMITER); - assertThat(indexMetadataFileName.split(DELIMITER).length, is(4)); - assertThat(splittedIndexMetadataFileName[0], is(METADATA_FILE_PREFIX)); - assertThat(splittedIndexMetadataFileName[1], is(RemoteStoreUtils.invertLong(indexMetadata.getVersion()))); - assertThat(splittedIndexMetadataFileName[3], is(String.valueOf(INDEX_METADATA_CURRENT_CODEC_VERSION))); - - verifyManifestFileNameWithCodec(MANIFEST_CURRENT_CODEC_VERSION); - verifyManifestFileNameWithCodec(CODEC_V1); - verifyManifestFileNameWithCodec(ClusterMetadataManifest.CODEC_V0); - } - - private void verifyManifestFileNameWithCodec(int codecVersion) { - int term = randomIntBetween(5, 10); - int version = randomIntBetween(5, 10); - String manifestFileName = RemoteClusterStateService.getManifestFileName(term, version, true, codecVersion); - assertThat(manifestFileName.split(DELIMITER).length, is(6)); - String[] splittedName = manifestFileName.split(DELIMITER); - assertThat(splittedName[0], is(MANIFEST_FILE_PREFIX)); - assertThat(splittedName[1], is(RemoteStoreUtils.invertLong(term))); - assertThat(splittedName[2], is(RemoteStoreUtils.invertLong(version))); - assertThat(splittedName[3], is("C")); - assertThat(splittedName[5], is(String.valueOf(codecVersion))); - - manifestFileName = RemoteClusterStateService.getManifestFileName(term, version, false, codecVersion); - splittedName = manifestFileName.split(DELIMITER); - assertThat(splittedName[3], is("P")); - } - - public void testIndexMetadataUploadWaitTimeSetting() { - // verify default value - assertEquals( - RemoteClusterStateService.INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT, - remoteClusterStateService.getIndexMetadataUploadTimeout() - ); - - // verify update index metadata upload timeout - int indexMetadataUploadTimeout = randomIntBetween(1, 10); - Settings newSettings = Settings.builder() - .put("cluster.remote_store.state.index_metadata.upload_timeout", indexMetadataUploadTimeout + "s") - .build(); - clusterSettings.applySettings(newSettings); - assertEquals(indexMetadataUploadTimeout, remoteClusterStateService.getIndexMetadataUploadTimeout().seconds()); - } - - public void testMetadataManifestUploadWaitTimeSetting() { - // verify default value - assertEquals( - RemoteClusterStateService.METADATA_MANIFEST_UPLOAD_TIMEOUT_DEFAULT, - remoteClusterStateService.getMetadataManifestUploadTimeout() - ); - - // verify update metadata manifest upload timeout - int metadataManifestUploadTimeout = randomIntBetween(1, 10); - Settings newSettings = Settings.builder() - .put("cluster.remote_store.state.metadata_manifest.upload_timeout", metadataManifestUploadTimeout + "s") - .build(); - clusterSettings.applySettings(newSettings); - assertEquals(metadataManifestUploadTimeout, remoteClusterStateService.getMetadataManifestUploadTimeout().seconds()); - } - - public void testGlobalMetadataUploadWaitTimeSetting() { - // verify default value - assertEquals( - RemoteClusterStateService.GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT, - remoteClusterStateService.getGlobalMetadataUploadTimeout() - ); - - // verify update global metadata upload timeout - int globalMetadataUploadTimeout = randomIntBetween(1, 10); - Settings newSettings = Settings.builder() - .put("cluster.remote_store.state.global_metadata.upload_timeout", globalMetadataUploadTimeout + "s") - .build(); - clusterSettings.applySettings(newSettings); - assertEquals(globalMetadataUploadTimeout, remoteClusterStateService.getGlobalMetadataUploadTimeout().seconds()); - } - public void testRemoteRoutingTableNotInitializedWhenDisabled() { - assertTrue(remoteClusterStateService.getRemoteRoutingTableService() instanceof NoopRemoteRoutingTableService); + if (publicationEnabled) { + assertTrue(remoteClusterStateService.getRemoteRoutingTableService() instanceof InternalRemoteRoutingTableService); + } else { + assertTrue(remoteClusterStateService.getRemoteRoutingTableService() instanceof NoopRemoteRoutingTableService); + } } public void testRemoteRoutingTableInitializedWhenEnabled() { @@ -1429,7 +1406,8 @@ public void testRemoteRoutingTableInitializedWhenEnabled() { clusterService, () -> 0L, threadPool, - List.of(new RemoteIndexPathUploader(threadPool, newSettings, repositoriesServiceSupplier, clusterSettings)) + List.of(new RemoteIndexPathUploader(threadPool, newSettings, repositoriesServiceSupplier, clusterSettings)), + writableRegistry() ); assertTrue(remoteClusterStateService.getRemoteRoutingTableService() instanceof InternalRemoteRoutingTableService); } @@ -1527,8 +1505,8 @@ public void testWriteFullMetadataInParallelSuccessWithRoutingTable() throws IOEx assertThat(manifest.getIndicesRouting().get(0).getIndexUUID(), is(uploadedIndiceRoutingMetadata.getIndexUUID())); assertThat(manifest.getIndicesRouting().get(0).getUploadedFilename(), notNullValue()); - assertEquals(8, actionListenerArgumentCaptor.getAllValues().size()); - assertEquals(8, writeContextArgumentCaptor.getAllValues().size()); + assertEquals(12, actionListenerArgumentCaptor.getAllValues().size()); + assertEquals(12, writeContextArgumentCaptor.getAllValues().size()); } public void testWriteIncrementalMetadataSuccessWithRoutingTable() throws IOException { @@ -1595,7 +1573,8 @@ private void initializeRoutingTable() { clusterService, () -> 0L, threadPool, - List.of(new RemoteIndexPathUploader(threadPool, newSettings, repositoriesServiceSupplier, clusterSettings)) + List.of(new RemoteIndexPathUploader(threadPool, newSettings, repositoriesServiceSupplier, clusterSettings)), + writableRegistry() ); } @@ -1629,8 +1608,8 @@ private void mockObjectsForGettingPreviousClusterUUID( mockBlobContainerForClusterUUIDs(uuidBlobContainer, clusterUUIDsPointers.keySet()); List uploadedIndexMetadataList1 = List.of( - new UploadedIndexMetadata("index1", "index-uuid1", "key1"), - new UploadedIndexMetadata("index2", "index-uuid2", "key2") + new UploadedIndexMetadata("index1", "index-uuid1", "key1__2"), + new UploadedIndexMetadata("index2", "index-uuid2", "key2__2") ); Map customMetadataMap = new HashMap<>(); final ClusterMetadataManifest clusterManifest1 = generateClusterMetadataManifest( @@ -1660,11 +1639,11 @@ private void mockObjectsForGettingPreviousClusterUUID( .build(); Map indexMetadataMap1 = Map.of("index-uuid1", indexMetadata1, "index-uuid2", indexMetadata2); mockBlobContainerForGlobalMetadata(blobContainer1, clusterManifest1, metadata1); - mockBlobContainer(blobContainer1, clusterManifest1, indexMetadataMap1, ClusterMetadataManifest.CODEC_V2); + mockBlobContainer(blobContainer1, clusterManifest1, indexMetadataMap1, MANIFEST_CURRENT_CODEC_VERSION); List uploadedIndexMetadataList2 = List.of( - new UploadedIndexMetadata("index1", "index-uuid1", "key1"), - new UploadedIndexMetadata("index2", "index-uuid2", "key2") + new UploadedIndexMetadata("index1", "index-uuid1", "key1__2"), + new UploadedIndexMetadata("index2", "index-uuid2", "key2__2") ); final ClusterMetadataManifest clusterManifest2 = generateClusterMetadataManifest( "cluster-uuid2", @@ -1692,7 +1671,7 @@ private void mockObjectsForGettingPreviousClusterUUID( .build(); Map indexMetadataMap2 = Map.of("index-uuid1", indexMetadata3, "index-uuid2", indexMetadata4); mockBlobContainerForGlobalMetadata(blobContainer2, clusterManifest2, metadata2); - mockBlobContainer(blobContainer2, clusterManifest2, indexMetadataMap2, ClusterMetadataManifest.CODEC_V2); + mockBlobContainer(blobContainer2, clusterManifest2, indexMetadataMap2, MANIFEST_CURRENT_CODEC_VERSION); // differGlobalMetadata controls which one of IndexMetadata or Metadata object would be different // when comparing cluster-uuid3 and cluster-uuid1 state. @@ -1702,7 +1681,7 @@ private void mockObjectsForGettingPreviousClusterUUID( // IndexMetadata and Metadata when deciding if the remote state b/w two different cluster uuids is same. List uploadedIndexMetadataList3 = differGlobalMetadata ? new ArrayList<>(uploadedIndexMetadataList1) - : List.of(new UploadedIndexMetadata("index1", "index-uuid1", "key1")); + : List.of(new UploadedIndexMetadata("index1", "index-uuid1", "key1__2")); IndexMetadata indexMetadata5 = IndexMetadata.builder("index1") .settings(indexSettings) .numberOfShards(1) @@ -1726,7 +1705,7 @@ private void mockObjectsForGettingPreviousClusterUUID( clusterUUIDCommitted.getOrDefault("cluster-uuid3", true) ); mockBlobContainerForGlobalMetadata(blobContainer3, clusterManifest3, metadata3); - mockBlobContainer(blobContainer3, clusterManifest3, indexMetadataMap3, ClusterMetadataManifest.CODEC_V2); + mockBlobContainer(blobContainer3, clusterManifest3, indexMetadataMap3, MANIFEST_CURRENT_CODEC_VERSION); ArrayList mockBlobContainerOrderedList = new ArrayList<>( List.of(blobContainer1, blobContainer1, blobContainer3, blobContainer3, blobContainer2, blobContainer2) @@ -1734,11 +1713,33 @@ private void mockObjectsForGettingPreviousClusterUUID( if (differGlobalMetadata) { mockBlobContainerOrderedList.addAll( - List.of(blobContainer3, blobContainer1, blobContainer3, blobContainer1, blobContainer1, blobContainer3) + List.of( + blobContainer3, + blobContainer1, + blobContainer3, + blobContainer1, + blobContainer1, + blobContainer1, + blobContainer1, + blobContainer3, + blobContainer3, + blobContainer3 + ) ); } mockBlobContainerOrderedList.addAll( - List.of(blobContainer2, blobContainer1, blobContainer2, blobContainer1, blobContainer1, blobContainer2) + List.of( + blobContainer2, + blobContainer1, + blobContainer2, + blobContainer1, + blobContainer1, + blobContainer1, + blobContainer1, + blobContainer2, + blobContainer2, + blobContainer2 + ) ); BlobContainer[] mockBlobContainerOrderedArray = new BlobContainer[mockBlobContainerOrderedList.size()]; mockBlobContainerOrderedList.toArray(mockBlobContainerOrderedArray); @@ -1808,6 +1809,17 @@ private BlobContainer mockBlobStoreObjects(Class blobCo final BlobPath blobPath = mock(BlobPath.class); when((blobStoreRepository.basePath())).thenReturn(blobPath); when(blobPath.add(anyString())).thenReturn(blobPath); + when(blobPath.iterator()).thenReturn(new Iterator() { + @Override + public boolean hasNext() { + return false; + } + + @Override + public String next() { + return null; + } + }); when(blobPath.buildAsString()).thenReturn("/blob/path/"); final BlobContainer blobContainer = mock(blobContainerClazz); when(blobContainer.path()).thenReturn(blobPath); @@ -1845,7 +1857,7 @@ private void mockBlobContainer( when(blobContainer.listBlobsByPrefixInSortedOrder("manifest" + DELIMITER, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC)) .thenReturn(Arrays.asList(blobMetadata)); - BytesReference bytes = RemoteClusterStateService.CLUSTER_METADATA_MANIFEST_FORMAT.serialize( + BytesReference bytes = RemoteClusterMetadataManifest.CLUSTER_METADATA_MANIFEST_FORMAT.serialize( clusterMetadataManifest, manifestFileName, blobStoreRepository.getCompressor(), @@ -1860,8 +1872,8 @@ private void mockBlobContainer( return; } String fileName = uploadedIndexMetadata.getUploadedFilename(); - when(blobContainer.readBlob(fileName + ".dat")).thenAnswer((invocationOnMock) -> { - BytesReference bytesIndexMetadata = RemoteClusterStateService.INDEX_METADATA_FORMAT.serialize( + when(blobContainer.readBlob(getFormattedIndexFileName(fileName))).thenAnswer((invocationOnMock) -> { + BytesReference bytesIndexMetadata = RemoteIndexMetadata.INDEX_METADATA_FORMAT.serialize( indexMetadata, fileName, blobStoreRepository.getCompressor(), @@ -1883,15 +1895,10 @@ private void mockBlobContainerForGlobalMetadata( int codecVersion = clusterMetadataManifest.getCodecVersion(); String mockManifestFileName = "manifest__1__2__C__456__" + codecVersion; BlobMetadata blobMetadata = new PlainBlobMetadata(mockManifestFileName, 1); - when( - blobContainer.listBlobsByPrefixInSortedOrder( - "manifest" + RemoteClusterStateService.DELIMITER, - 1, - BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC - ) - ).thenReturn(Arrays.asList(blobMetadata)); + when(blobContainer.listBlobsByPrefixInSortedOrder("manifest" + DELIMITER, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC)) + .thenReturn(Arrays.asList(blobMetadata)); - BytesReference bytes = RemoteClusterStateService.CLUSTER_METADATA_MANIFEST_FORMAT.serialize( + BytesReference bytes = RemoteClusterMetadataManifest.CLUSTER_METADATA_MANIFEST_FORMAT.serialize( clusterMetadataManifest, mockManifestFileName, blobStoreRepository.getCompressor(), @@ -1900,76 +1907,75 @@ private void mockBlobContainerForGlobalMetadata( when(blobContainer.readBlob(mockManifestFileName)).thenReturn(new ByteArrayInputStream(bytes.streamInput().readAllBytes())); if (codecVersion >= ClusterMetadataManifest.CODEC_V2) { String coordinationFileName = getFileNameFromPath(clusterMetadataManifest.getCoordinationMetadata().getUploadedFilename()); - when(blobContainer.readBlob(RemoteClusterStateService.COORDINATION_METADATA_FORMAT.blobName(coordinationFileName))).thenAnswer( - (invocationOnMock) -> { - BytesReference bytesReference = RemoteClusterStateService.COORDINATION_METADATA_FORMAT.serialize( - metadata.coordinationMetadata(), - coordinationFileName, - blobStoreRepository.getCompressor(), - FORMAT_PARAMS - ); - return new ByteArrayInputStream(bytesReference.streamInput().readAllBytes()); - } - ); + when(blobContainer.readBlob(COORDINATION_METADATA_FORMAT.blobName(coordinationFileName))).thenAnswer((invocationOnMock) -> { + BytesReference bytesReference = COORDINATION_METADATA_FORMAT.serialize( + metadata.coordinationMetadata(), + coordinationFileName, + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); + return new ByteArrayInputStream(bytesReference.streamInput().readAllBytes()); + }); String settingsFileName = getFileNameFromPath(clusterMetadataManifest.getSettingsMetadata().getUploadedFilename()); - when(blobContainer.readBlob(RemoteClusterStateService.SETTINGS_METADATA_FORMAT.blobName(settingsFileName))).thenAnswer( - (invocationOnMock) -> { - BytesReference bytesReference = RemoteClusterStateService.SETTINGS_METADATA_FORMAT.serialize( - metadata.persistentSettings(), - settingsFileName, - blobStoreRepository.getCompressor(), - FORMAT_PARAMS - ); - return new ByteArrayInputStream(bytesReference.streamInput().readAllBytes()); - } - ); + when(blobContainer.readBlob(SETTINGS_METADATA_FORMAT.blobName(settingsFileName))).thenAnswer((invocationOnMock) -> { + BytesReference bytesReference = SETTINGS_METADATA_FORMAT.serialize( + metadata.persistentSettings(), + settingsFileName, + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); + return new ByteArrayInputStream(bytesReference.streamInput().readAllBytes()); + }); String templatesFileName = getFileNameFromPath(clusterMetadataManifest.getTemplatesMetadata().getUploadedFilename()); - when(blobContainer.readBlob(RemoteClusterStateService.TEMPLATES_METADATA_FORMAT.blobName(templatesFileName))).thenAnswer( - (invocationOnMock) -> { - BytesReference bytesReference = RemoteClusterStateService.TEMPLATES_METADATA_FORMAT.serialize( - metadata.templatesMetadata(), - templatesFileName, - blobStoreRepository.getCompressor(), - FORMAT_PARAMS - ); - return new ByteArrayInputStream(bytesReference.streamInput().readAllBytes()); - } - ); + when(blobContainer.readBlob(TEMPLATES_METADATA_FORMAT.blobName(templatesFileName))).thenAnswer((invocationOnMock) -> { + BytesReference bytesReference = TEMPLATES_METADATA_FORMAT.serialize( + metadata.templatesMetadata(), + templatesFileName, + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); + return new ByteArrayInputStream(bytesReference.streamInput().readAllBytes()); + }); Map customFileMap = clusterMetadataManifest.getCustomMetadataMap() .entrySet() .stream() .collect(Collectors.toMap(Map.Entry::getKey, entry -> getFileNameFromPath(entry.getValue().getUploadedFilename()))); + // ChecksumBlobStoreFormat customMetadataFormat = new ChecksumBlobStoreFormat<>( + // "custom", + // METADATA_NAME_PLAIN_FORMAT, + // null + // ); + + ChecksumWritableBlobStoreFormat customMetadataFormat = new ChecksumWritableBlobStoreFormat<>("custom", null); for (Map.Entry entry : customFileMap.entrySet()) { String custom = entry.getKey(); String fileName = entry.getValue(); - when(blobContainer.readBlob(RemoteClusterStateService.CUSTOM_METADATA_FORMAT.blobName(fileName))).thenAnswer( - (invocation) -> { - BytesReference bytesReference = RemoteClusterStateService.CUSTOM_METADATA_FORMAT.serialize( - metadata.custom(custom), - fileName, - blobStoreRepository.getCompressor(), - FORMAT_PARAMS - ); - return new ByteArrayInputStream(bytesReference.streamInput().readAllBytes()); - } - ); + when(blobContainer.readBlob(fileName)).thenAnswer((invocation) -> { + BytesReference bytesReference = customMetadataFormat.serialize( + metadata.custom(custom), + fileName, + blobStoreRepository.getCompressor() + ); + return new ByteArrayInputStream(bytesReference.streamInput().readAllBytes()); + }); } } else if (codecVersion == CODEC_V1) { String[] splitPath = clusterMetadataManifest.getGlobalMetadataFileName().split("/"); - when(blobContainer.readBlob(RemoteClusterStateService.GLOBAL_METADATA_FORMAT.blobName(splitPath[splitPath.length - 1]))) - .thenAnswer((invocationOnMock) -> { - BytesReference bytesGlobalMetadata = RemoteClusterStateService.GLOBAL_METADATA_FORMAT.serialize( + when(blobContainer.readBlob(GLOBAL_METADATA_FORMAT.blobName(splitPath[splitPath.length - 1]))).thenAnswer( + (invocationOnMock) -> { + BytesReference bytesGlobalMetadata = GLOBAL_METADATA_FORMAT.serialize( metadata, "global-metadata-file", blobStoreRepository.getCompressor(), FORMAT_PARAMS ); return new ByteArrayInputStream(bytesGlobalMetadata.streamInput().readAllBytes()); - }); + } + ); } } @@ -1978,7 +1984,7 @@ private String getFileNameFromPath(String filePath) { return splitPath[splitPath.length - 1]; } - private static ClusterState.Builder generateClusterStateWithGlobalMetadata() { + static ClusterState.Builder generateClusterStateWithGlobalMetadata() { final Settings clusterSettings = Settings.builder().put("cluster.blocks.read_only", true).build(); final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManagerTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManagerTests.java new file mode 100644 index 0000000000000..bd01bc1ab0cdb --- /dev/null +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManagerTests.java @@ -0,0 +1,295 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterModule; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.DiffableUtils; +import org.opensearch.cluster.metadata.IndexGraveyard; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.Metadata.XContentContext; +import org.opensearch.common.network.NetworkModule; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.compress.NoneCompressor; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.indices.IndicesModule; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.TestCustomMetadata; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.EnumSet; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Stream; + +import static java.util.stream.Collectors.toList; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RemoteGlobalMetadataManagerTests extends OpenSearchTestCase { + private RemoteGlobalMetadataManager remoteGlobalMetadataManager; + private ClusterSettings clusterSettings; + private BlobStoreRepository blobStoreRepository; + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + @Before + public void setup() { + clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + blobStoreRepository = mock(BlobStoreRepository.class); + BlobStoreTransferService blobStoreTransferService = mock(BlobStoreTransferService.class); + NamedXContentRegistry xContentRegistry = new NamedXContentRegistry( + Stream.of( + NetworkModule.getNamedXContents().stream(), + IndicesModule.getNamedXContents().stream(), + ClusterModule.getNamedXWriteables().stream() + ).flatMap(Function.identity()).collect(toList()) + ); + Compressor compressor = new NoneCompressor(); + when(blobStoreRepository.getCompressor()).thenReturn(compressor); + when(blobStoreRepository.getNamedXContentRegistry()).thenReturn(xContentRegistry); + remoteGlobalMetadataManager = new RemoteGlobalMetadataManager( + clusterSettings, + "test-cluster", + blobStoreRepository, + blobStoreTransferService, + writableRegistry(), + threadPool + ); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdown(); + } + + public void testGlobalMetadataUploadWaitTimeSetting() { + // verify default value + assertEquals( + RemoteGlobalMetadataManager.GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT, + remoteGlobalMetadataManager.getGlobalMetadataUploadTimeout() + ); + + // verify update global metadata upload timeout + int globalMetadataUploadTimeout = randomIntBetween(1, 10); + Settings newSettings = Settings.builder() + .put("cluster.remote_store.state.global_metadata.upload_timeout", globalMetadataUploadTimeout + "s") + .build(); + clusterSettings.applySettings(newSettings); + assertEquals(globalMetadataUploadTimeout, remoteGlobalMetadataManager.getGlobalMetadataUploadTimeout().seconds()); + } + + public void testGetUpdatedCustoms() { + Map previousCustoms = Map.of( + CustomMetadata1.TYPE, + new CustomMetadata1("data1"), + CustomMetadata2.TYPE, + new CustomMetadata2("data2"), + CustomMetadata3.TYPE, + new CustomMetadata3("data3") + ); + ClusterState previousState = ClusterState.builder(new ClusterName("test-cluster")) + .metadata(Metadata.builder().customs(previousCustoms)) + .build(); + + Map currentCustoms = Map.of( + CustomMetadata2.TYPE, + new CustomMetadata2("data2"), + CustomMetadata3.TYPE, + new CustomMetadata3("data3-changed"), + CustomMetadata4.TYPE, + new CustomMetadata4("data4"), + CustomMetadata5.TYPE, + new CustomMetadata5("data5") + ); + ClusterState currentState = ClusterState.builder(new ClusterName("test-cluster")) + .metadata(Metadata.builder().customs(currentCustoms)) + .build(); + + DiffableUtils.MapDiff> customsDiff = remoteGlobalMetadataManager + .getCustomsDiff(currentState, previousState, true, false); + Map expectedUpserts = Map.of( + CustomMetadata2.TYPE, + new CustomMetadata2("data2"), + CustomMetadata3.TYPE, + new CustomMetadata3("data3-changed"), + CustomMetadata4.TYPE, + new CustomMetadata4("data4"), + IndexGraveyard.TYPE, + IndexGraveyard.builder().build() + ); + assertThat(customsDiff.getUpserts(), is(expectedUpserts)); + assertThat(customsDiff.getDeletes(), is(List.of())); + + customsDiff = remoteGlobalMetadataManager.getCustomsDiff(currentState, previousState, false, false); + expectedUpserts = Map.of( + CustomMetadata3.TYPE, + new CustomMetadata3("data3-changed"), + CustomMetadata4.TYPE, + new CustomMetadata4("data4") + ); + assertThat(customsDiff.getUpserts(), is(expectedUpserts)); + assertThat(customsDiff.getDeletes(), is(List.of(CustomMetadata1.TYPE))); + + customsDiff = remoteGlobalMetadataManager.getCustomsDiff(currentState, previousState, true, true); + expectedUpserts = Map.of( + CustomMetadata2.TYPE, + new CustomMetadata2("data2"), + CustomMetadata3.TYPE, + new CustomMetadata3("data3-changed"), + CustomMetadata4.TYPE, + new CustomMetadata4("data4"), + CustomMetadata5.TYPE, + new CustomMetadata5("data5"), + IndexGraveyard.TYPE, + IndexGraveyard.builder().build() + ); + assertThat(customsDiff.getUpserts(), is(expectedUpserts)); + assertThat(customsDiff.getDeletes(), is(List.of())); + + customsDiff = remoteGlobalMetadataManager.getCustomsDiff(currentState, previousState, false, true); + expectedUpserts = Map.of( + CustomMetadata3.TYPE, + new CustomMetadata3("data3-changed"), + CustomMetadata4.TYPE, + new CustomMetadata4("data4"), + CustomMetadata5.TYPE, + new CustomMetadata5("data5") + ); + assertThat(customsDiff.getUpserts(), is(expectedUpserts)); + assertThat(customsDiff.getDeletes(), is(List.of(CustomMetadata1.TYPE))); + + } + + private static class CustomMetadata1 extends TestCustomMetadata { + public static final String TYPE = "custom_md_1"; + + CustomMetadata1(String data) { + super(data); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + @Override + public EnumSet context() { + return EnumSet.of(Metadata.XContentContext.GATEWAY); + } + } + + private static class CustomMetadata2 extends TestCustomMetadata { + public static final String TYPE = "custom_md_2"; + + CustomMetadata2(String data) { + super(data); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + @Override + public EnumSet context() { + return EnumSet.of(Metadata.XContentContext.GATEWAY); + } + } + + private static class CustomMetadata3 extends TestCustomMetadata { + public static final String TYPE = "custom_md_3"; + + CustomMetadata3(String data) { + super(data); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + @Override + public EnumSet context() { + return EnumSet.of(Metadata.XContentContext.GATEWAY); + } + } + + private static class CustomMetadata4 extends TestCustomMetadata { + public static final String TYPE = "custom_md_4"; + + CustomMetadata4(String data) { + super(data); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + @Override + public EnumSet context() { + return EnumSet.of(Metadata.XContentContext.GATEWAY); + } + } + + private static class CustomMetadata5 extends TestCustomMetadata { + public static final String TYPE = "custom_md_5"; + + CustomMetadata5(String data) { + super(data); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + @Override + public EnumSet context() { + return EnumSet.of(XContentContext.API); + } + } +} diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteManifestManagerTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteManifestManagerTests.java new file mode 100644 index 0000000000000..055bd94f5317b --- /dev/null +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteManifestManagerTests.java @@ -0,0 +1,129 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.cluster.ClusterModule; +import org.opensearch.cluster.ClusterState; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.network.NetworkModule; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.compress.NoneCompressor; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.indices.IndicesModule; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.function.Function; +import java.util.stream.Stream; + +import static java.util.stream.Collectors.toList; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RemoteManifestManagerTests extends OpenSearchTestCase { + private RemoteManifestManager remoteManifestManager; + private ClusterSettings clusterSettings; + private BlobStoreRepository blobStoreRepository; + private BlobStore blobStore; + private BlobStoreTransferService blobStoreTransferService; + private ThreadPool threadPool; + + @Before + public void setup() { + clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + blobStoreRepository = mock(BlobStoreRepository.class); + NamedXContentRegistry xContentRegistry = new NamedXContentRegistry( + Stream.of( + NetworkModule.getNamedXContents().stream(), + IndicesModule.getNamedXContents().stream(), + ClusterModule.getNamedXWriteables().stream() + ).flatMap(Function.identity()).collect(toList()) + ); + blobStoreTransferService = mock(BlobStoreTransferService.class); + blobStore = mock(BlobStore.class); + when(blobStoreRepository.blobStore()).thenReturn(blobStore); + threadPool = new TestThreadPool("test"); + Compressor compressor = new NoneCompressor(); + when(blobStoreRepository.getCompressor()).thenReturn(compressor); + when(blobStoreRepository.getNamedXContentRegistry()).thenReturn(xContentRegistry); + remoteManifestManager = new RemoteManifestManager( + clusterSettings, + "test-cluster-name", + "test-node-id", + blobStoreRepository, + blobStoreTransferService, + threadPool + ); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdown(); + } + + public void testMetadataManifestUploadWaitTimeSetting() { + // verify default value + assertEquals( + RemoteManifestManager.METADATA_MANIFEST_UPLOAD_TIMEOUT_DEFAULT, + remoteManifestManager.getMetadataManifestUploadTimeout() + ); + + // verify update metadata manifest upload timeout + int metadataManifestUploadTimeout = randomIntBetween(1, 10); + Settings newSettings = Settings.builder() + .put("cluster.remote_store.state.metadata_manifest.upload_timeout", metadataManifestUploadTimeout + "s") + .build(); + clusterSettings.applySettings(newSettings); + assertEquals(metadataManifestUploadTimeout, remoteManifestManager.getMetadataManifestUploadTimeout().seconds()); + } + + public void testReadLatestMetadataManifestFailedIOException() throws IOException { + final ClusterState clusterState = RemoteClusterStateServiceTests.generateClusterStateWithOneIndex() + .nodes(RemoteClusterStateServiceTests.nodesWithLocalNodeClusterManager()) + .build(); + + BlobContainer blobContainer = mockBlobStoreObjects(); + when(blobContainer.listBlobsByPrefixInSortedOrder("manifest" + DELIMITER, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC)) + .thenThrow(IOException.class); + + Exception e = assertThrows( + IllegalStateException.class, + () -> remoteManifestManager.getLatestClusterMetadataManifest( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ) + ); + assertEquals(e.getMessage(), "Error while fetching latest manifest file for remote cluster state"); + } + + private BlobContainer mockBlobStoreObjects() { + final BlobPath blobPath = mock(BlobPath.class); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + when(blobPath.add(anyString())).thenReturn(blobPath); + when(blobPath.buildAsString()).thenReturn("/blob/path/"); + final BlobContainer blobContainer = mock(BlobContainer.class); + when(blobContainer.path()).thenReturn(blobPath); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + return blobContainer; + } +} diff --git a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterBlocksTests.java b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterBlocksTests.java index a5419a8cc8115..3c1e141b81360 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterBlocksTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterBlocksTests.java @@ -136,16 +136,16 @@ public void testSerDe() throws IOException { } } - static ClusterBlocks randomClusterBlocks() { + public static ClusterBlocks randomClusterBlocks() { ClusterBlocks.Builder builder = ClusterBlocks.builder(); - int randomGlobalBlocks = randomIntBetween(0, 10); + int randomGlobalBlocks = randomIntBetween(1, 10); for (int i = 0; i < randomGlobalBlocks; i++) { builder.addGlobalBlock(randomClusterBlock()); } - int randomIndices = randomIntBetween(0, 10); + int randomIndices = randomIntBetween(1, 10); for (int i = 0; i < randomIndices; i++) { - int randomIndexBlocks = randomIntBetween(0, 10); + int randomIndexBlocks = randomIntBetween(1, 10); for (int j = 0; j < randomIndexBlocks; j++) { builder.addIndexBlock("index-" + i, randomClusterBlock()); } diff --git a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifestTests.java b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifestTests.java index 7cb80a1600c03..de1befbecd924 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifestTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifestTests.java @@ -41,6 +41,8 @@ import java.util.stream.Stream; import static java.util.stream.Collectors.toList; +import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V0; +import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V2; import static org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest.MANIFEST; import static org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest.MANIFEST_CURRENT_CODEC_VERSION; import static org.hamcrest.Matchers.greaterThan; @@ -236,6 +238,28 @@ public void testSerDe() throws IOException { assertThrows(IllegalArgumentException.class, () -> invalidRemoteObject.deserialize(new ByteArrayInputStream(new byte[0]))); } + public void testGetManifestCodecVersion() { + String manifestFileWithDelimiterInPath = + "123456789012_test-cluster/cluster-state/dsgYj10__Nkso7/manifest/manifest__9223372036854775806__9223372036854775804__C__9223370319103329556__2"; + RemoteClusterMetadataManifest remoteManifestForDownload = new RemoteClusterMetadataManifest( + manifestFileWithDelimiterInPath, + clusterUUID, + compressor, + namedXContentRegistry + ); + assertEquals(CODEC_V2, remoteManifestForDownload.getManifestCodecVersion()); + + String v0ManifestFileWithDelimiterInPath = + "123456789012_test-cluster/cluster-state/dsgYj10__Nkso7/manifest/manifest__9223372036854775806__9223372036854775804__C__9223370319103329556"; + RemoteClusterMetadataManifest remoteManifestV0ForDownload = new RemoteClusterMetadataManifest( + v0ManifestFileWithDelimiterInPath, + clusterUUID, + compressor, + namedXContentRegistry + ); + assertEquals(CODEC_V0, remoteManifestV0ForDownload.getManifestCodecVersion()); + } + private ClusterMetadataManifest getClusterMetadataManifest() { return ClusterMetadataManifest.builder() .opensearchVersion(Version.CURRENT) diff --git a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustomsTests.java b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustomsTests.java index 1f7a5e8bfffb1..1b020e13324a4 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustomsTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustomsTests.java @@ -232,12 +232,12 @@ public void testSerDe() throws IOException { try (InputStream inputStream = remoteObjectForUpload.serialize()) { remoteObjectForUpload.setFullBlobName(BlobPath.cleanPath()); assertThat(inputStream.available(), greaterThan(0)); - Custom readclusterStateCustoms = remoteObjectForUpload.deserialize(inputStream); - assertThat(readclusterStateCustoms, is(clusterStateCustoms)); + Custom readClusterStateCustoms = remoteObjectForUpload.deserialize(inputStream); + assertThat(readClusterStateCustoms, is(clusterStateCustoms)); } } - private Custom getClusterStateCustom() { + public static SnapshotsInProgress getClusterStateCustom() { return SnapshotsInProgress.of( List.of( new SnapshotsInProgress.Entry( diff --git a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteCustomMetadataTests.java b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteCustomMetadataTests.java index a0c60ee2088b8..1bce176273270 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteCustomMetadataTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteCustomMetadataTests.java @@ -8,24 +8,21 @@ package org.opensearch.gateway.remote.model; -import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.metadata.IndexGraveyard; import org.opensearch.cluster.metadata.Metadata.Custom; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.compress.DeflateCompressor; -import org.opensearch.common.network.NetworkModule; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.compress.Compressor; import org.opensearch.core.compress.NoneCompressor; import org.opensearch.core.index.Index; -import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; import org.opensearch.gateway.remote.RemoteClusterStateUtils; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.translog.transfer.BlobStoreTransferService; -import org.opensearch.indices.IndicesModule; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; @@ -36,10 +33,7 @@ import java.io.IOException; import java.io.InputStream; import java.util.List; -import java.util.function.Function; -import java.util.stream.Stream; -import static java.util.stream.Collectors.toList; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.GLOBAL_METADATA_CURRENT_CODEC_VERSION; import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_DELIMITER; import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_METADATA; @@ -62,7 +56,7 @@ public class RemoteCustomMetadataTests extends OpenSearchTestCase { private String clusterName; private ClusterSettings clusterSettings; private Compressor compressor; - private NamedXContentRegistry namedXContentRegistry; + private NamedWriteableRegistry namedWriteableRegistry; private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); @Before @@ -75,15 +69,7 @@ public void setup() { when(blobStoreRepository.basePath()).thenReturn(blobPath); when(blobStoreRepository.getCompressor()).thenReturn(new DeflateCompressor()); compressor = new NoneCompressor(); - namedXContentRegistry = new NamedXContentRegistry( - Stream.of( - NetworkModule.getNamedXContents().stream(), - IndicesModule.getNamedXContents().stream(), - ClusterModule.getNamedXWriteables().stream() - ).flatMap(Function.identity()).collect(toList()) - ); - // namedXContentRegistry = new NamedXContentRegistry(List.of(new Entry(Metadata.Custom.class, new ParseField(CUSTOM_TYPE), - // p->TestCustomMetadata.fromXContent(CustomMetadata1::new, p)))); + namedWriteableRegistry = writableRegistry(); this.clusterName = "test-cluster-name"; } @@ -101,7 +87,7 @@ public void testClusterUUID() { METADATA_VERSION, clusterUUID, compressor, - namedXContentRegistry + namedWriteableRegistry ); assertThat(remoteObjectForUpload.clusterUUID(), is(clusterUUID)); @@ -110,7 +96,7 @@ public void testClusterUUID() { "test-custom", clusterUUID, compressor, - namedXContentRegistry + namedWriteableRegistry ); assertThat(remoteObjectForDownload.clusterUUID(), is(clusterUUID)); } @@ -123,7 +109,7 @@ public void testFullBlobName() { METADATA_VERSION, clusterUUID, compressor, - namedXContentRegistry + namedWriteableRegistry ); assertThat(remoteObjectForUpload.getFullBlobName(), nullValue()); @@ -132,7 +118,7 @@ public void testFullBlobName() { "test-custom", clusterUUID, compressor, - namedXContentRegistry + namedWriteableRegistry ); assertThat(remoteObjectForDownload.getFullBlobName(), is(TEST_BLOB_NAME)); } @@ -145,7 +131,7 @@ public void testBlobFileName() { METADATA_VERSION, clusterUUID, compressor, - namedXContentRegistry + namedWriteableRegistry ); assertThat(remoteObjectForUpload.getBlobFileName(), nullValue()); @@ -154,7 +140,7 @@ public void testBlobFileName() { "test-custom", clusterUUID, compressor, - namedXContentRegistry + namedWriteableRegistry ); assertThat(remoteObjectForDownload.getBlobFileName(), is(TEST_BLOB_FILE_NAME)); } @@ -166,7 +152,7 @@ public void testBlobPathTokens() { "test-custom", clusterUUID, compressor, - namedXContentRegistry + namedWriteableRegistry ); assertThat(remoteObjectForDownload.getBlobPathTokens(), is(new String[] { "user", "local", "opensearch", "customMetadata" })); } @@ -179,7 +165,7 @@ public void testBlobPathParameters() { METADATA_VERSION, clusterUUID, compressor, - namedXContentRegistry + namedWriteableRegistry ); BlobPathParameters params = remoteObjectForUpload.getBlobPathParameters(); assertThat(params.getPathTokens(), is(List.of(RemoteClusterStateUtils.GLOBAL_METADATA_PATH_TOKEN))); @@ -195,7 +181,7 @@ public void testGenerateBlobFileName() { METADATA_VERSION, clusterUUID, compressor, - namedXContentRegistry + namedWriteableRegistry ); String blobFileName = remoteObjectForUpload.generateBlobFileName(); String[] nameTokens = blobFileName.split(RemoteClusterStateUtils.DELIMITER); @@ -215,7 +201,7 @@ public void testGetUploadedMetadata() throws IOException { METADATA_VERSION, clusterUUID, compressor, - namedXContentRegistry + namedWriteableRegistry ); assertThrows(AssertionError.class, remoteObjectForUpload::getUploadedMetadata); @@ -236,7 +222,7 @@ public void testSerDe() throws IOException { METADATA_VERSION, clusterUUID, compressor, - namedXContentRegistry + namedWriteableRegistry ); try (InputStream inputStream = remoteObjectForUpload.serialize()) { remoteObjectForUpload.setFullBlobName(BlobPath.cleanPath()); diff --git a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodesTests.java b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodesTests.java index 0c46960938798..f1bced2bdf855 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodesTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodesTests.java @@ -145,7 +145,6 @@ public void testExceptionDuringSerialization() throws IOException { RemoteDiscoveryNodes remoteObjectForUpload = new RemoteDiscoveryNodes(nodes, METADATA_VERSION, clusterUUID, compressor); doThrow(new IOException("mock-exception")).when(nodes).writeTo(any()); IOException iea = assertThrows(IOException.class, remoteObjectForUpload::serialize); - assertEquals("Failed to serialize remote discovery nodes", iea.getMessage()); } public void testExceptionDuringDeserialize() throws IOException { @@ -155,10 +154,9 @@ public void testExceptionDuringDeserialize() throws IOException { String uploadedFile = "user/local/opensearch/discovery-nodes"; RemoteDiscoveryNodes remoteObjectForDownload = new RemoteDiscoveryNodes(uploadedFile, clusterUUID, compressor); IOException ioe = assertThrows(IOException.class, () -> remoteObjectForDownload.deserialize(in)); - assertEquals("Failed to deserialize remote discovery nodes", ioe.getMessage()); } - private DiscoveryNodes getDiscoveryNodes() { + public static DiscoveryNodes getDiscoveryNodes() { return DiscoveryNodes.builder() .add( new DiscoveryNode( diff --git a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadataTests.java b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadataTests.java index 24752302dc3df..02ddc8ba93071 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadataTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadataTests.java @@ -205,6 +205,5 @@ private Metadata getGlobalMetadata() { .build() ) .build(); - } } diff --git a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteIndexMetadataTests.java b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteIndexMetadataTests.java index 8bf053e45e0b3..7f9c3fdbae91b 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteIndexMetadataTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteIndexMetadataTests.java @@ -40,8 +40,8 @@ import java.util.stream.Stream; import static java.util.stream.Collectors.toList; +import static org.opensearch.gateway.remote.model.RemoteIndexMetadata.INDEX; import static org.opensearch.gateway.remote.model.RemoteIndexMetadata.INDEX_METADATA_CURRENT_CODEC_VERSION; -import static org.opensearch.gateway.remote.model.RemoteIndexMetadata.INDEX_PATH_TOKEN; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -137,7 +137,7 @@ public void testBlobPathParameters() { IndexMetadata indexMetadata = getIndexMetadata(); RemoteIndexMetadata remoteObjectForUpload = new RemoteIndexMetadata(indexMetadata, clusterUUID, compressor, namedXContentRegistry); BlobPathParameters params = remoteObjectForUpload.getBlobPathParameters(); - assertThat(params.getPathTokens(), is(List.of(INDEX_PATH_TOKEN, indexMetadata.getIndexUUID()))); + assertThat(params.getPathTokens(), is(List.of(INDEX, indexMetadata.getIndexUUID()))); assertThat(params.getFilePrefix(), is("metadata")); } @@ -156,12 +156,9 @@ public void testGetUploadedMetadata() throws IOException { IndexMetadata indexMetadata = getIndexMetadata(); RemoteIndexMetadata remoteObjectForUpload = new RemoteIndexMetadata(indexMetadata, clusterUUID, compressor, namedXContentRegistry); assertThrows(AssertionError.class, remoteObjectForUpload::getUploadedMetadata); - - try (InputStream inputStream = remoteObjectForUpload.serialize()) { - remoteObjectForUpload.setFullBlobName(new BlobPath().add(TEST_BLOB_PATH)); - UploadedMetadata uploadedMetadata = remoteObjectForUpload.getUploadedMetadata(); - assertThat(uploadedMetadata.getUploadedFilename(), is(remoteObjectForUpload.getBlobFileName())); - } + remoteObjectForUpload.setFullBlobName(new BlobPath().add(TEST_BLOB_PATH)); + UploadedMetadata uploadedMetadata = remoteObjectForUpload.getUploadedMetadata(); + assertEquals(uploadedMetadata.getUploadedFilename(), remoteObjectForUpload.getFullBlobName()); } public void testSerDe() throws IOException { diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java index 2aa310ae959d9..98bcaa3a1a46b 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java @@ -208,7 +208,7 @@ public void testChangeLocale() throws IOException { fieldMapping(b -> b.field("type", "date").field("format", "E, d MMM yyyy HH:mm:ss Z").field("locale", "de")) ); - mapper.parse(source(b -> b.field("field", "Mi, 06 Dez 2000 02:55:00 -0800"))); + mapper.parse(source(b -> b.field("field", "Mi., 06 Dez. 2000 02:55:00 -0800"))); } public void testNullValue() throws IOException { diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteIndexPathUploaderTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteIndexPathUploaderTests.java index e539b382a5f3b..e0a75f7296705 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteIndexPathUploaderTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteIndexPathUploaderTests.java @@ -22,7 +22,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; import org.opensearch.gateway.remote.RemoteClusterStateService; -import org.opensearch.gateway.remote.RemoteClusterStateService.RemoteStateTransferException; +import org.opensearch.gateway.remote.RemoteStateTransferException; import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.node.Node; @@ -46,6 +46,7 @@ import org.mockito.Mockito; +import static org.opensearch.gateway.remote.RemoteGlobalMetadataManager.GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.FIXED; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_INFIX; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_PREFIX; @@ -276,7 +277,7 @@ public void testInterceptWithLatchAwaitTimeout() throws IOException { Settings settings = Settings.builder() .put(this.settings) - .put(RemoteClusterStateService.INDEX_METADATA_UPLOAD_TIMEOUT_SETTING.getKey(), TimeValue.ZERO) + .put(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING.getKey(), TimeValue.ZERO) .build(); clusterSettings.applySettings(settings); SetOnce exceptionSetOnce = new SetOnce<>(); @@ -306,7 +307,7 @@ public void testInterceptWithInterruptedExceptionDuringLatchAwait() throws Excep remoteIndexPathUploader.start(); Settings settings = Settings.builder() .put(this.settings) - .put(RemoteClusterStateService.INDEX_METADATA_UPLOAD_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .put(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(1)) .build(); clusterSettings.applySettings(settings); SetOnce exceptionSetOnce = new SetOnce<>(); diff --git a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java index dcddd9f3d1318..205712d388cd1 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java @@ -95,7 +95,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; +import java.util.ConcurrentModificationException; import java.util.List; import java.util.Map; import java.util.Optional; @@ -105,6 +105,9 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.Phaser; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import static java.util.Collections.emptyMap; @@ -488,7 +491,7 @@ public void testStaleCount_OnRemovalNotificationOfStaleKey_DecrementsStaleCount( indexShard.hashCode() ); // test the mapping - ConcurrentMap> cleanupKeyToCountMap = cache.cacheCleanupManager.getCleanupKeyToCountMap(); + ConcurrentMap> cleanupKeyToCountMap = cache.cacheCleanupManager.getCleanupKeyToCountMap(); // shard id should exist assertTrue(cleanupKeyToCountMap.containsKey(shardId)); // reader CacheKeyId should NOT exist @@ -551,7 +554,7 @@ public void testStaleCount_OnRemovalNotificationOfNonStaleKey_DoesNotDecrementsS ); // test the mapping - ConcurrentMap> cleanupKeyToCountMap = cache.cacheCleanupManager.getCleanupKeyToCountMap(); + ConcurrentMap> cleanupKeyToCountMap = cache.cacheCleanupManager.getCleanupKeyToCountMap(); // shard id should exist assertTrue(cleanupKeyToCountMap.containsKey(shardId)); // reader CacheKeyId should NOT exist @@ -719,7 +722,7 @@ public void testCleanupKeyToCountMapAreSetAppropriately() throws Exception { cache.getOrCompute(getEntity(indexShard), getLoader(reader), reader, getTermBytes()); assertEquals(1, cache.count()); // test the mappings - ConcurrentMap> cleanupKeyToCountMap = cache.cacheCleanupManager.getCleanupKeyToCountMap(); + ConcurrentMap> cleanupKeyToCountMap = cache.cacheCleanupManager.getCleanupKeyToCountMap(); assertEquals(1, (int) cleanupKeyToCountMap.get(shardId).get(getReaderCacheKeyId(reader))); cache.getOrCompute(getEntity(indexShard), getLoader(secondReader), secondReader, getTermBytes()); @@ -792,27 +795,71 @@ public void testCleanupKeyToCountMapAreSetAppropriately() throws Exception { IOUtils.close(secondReader); } - private DirectoryReader getReader(IndexWriter writer, ShardId shardId) throws IOException { - return OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), shardId); + // test adding to cleanupKeyToCountMap with multiple threads + public void testAddToCleanupKeyToCountMap() throws Exception { + threadPool = getThreadPool(); + Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "51%").build(); + cache = getIndicesRequestCache(settings); + + int numberOfThreads = 10; + int numberOfIterations = 1000; + Phaser phaser = new Phaser(numberOfThreads + 1); // +1 for the main thread + AtomicBoolean exceptionDetected = new AtomicBoolean(false); + + ExecutorService executorService = Executors.newFixedThreadPool(numberOfThreads); + + for (int i = 0; i < numberOfThreads; i++) { + executorService.submit(() -> { + phaser.arriveAndAwaitAdvance(); // Ensure all threads start at the same time + try { + for (int j = 0; j < numberOfIterations; j++) { + cache.cacheCleanupManager.addToCleanupKeyToCountMap(indexShard.shardId(), UUID.randomUUID().toString()); + } + } catch (ConcurrentModificationException e) { + logger.error("ConcurrentModificationException detected in thread : " + e.getMessage()); + exceptionDetected.set(true); // Set flag if exception is detected + } + }); + } + phaser.arriveAndAwaitAdvance(); // Start all threads + + // Main thread iterates over the map + executorService.submit(() -> { + try { + for (int j = 0; j < numberOfIterations; j++) { + cache.cacheCleanupManager.getCleanupKeyToCountMap().forEach((k, v) -> { + v.forEach((k1, v1) -> { + // Accessing the map to create contention + v.get(k1); + }); + }); + } + } catch (ConcurrentModificationException e) { + logger.error("ConcurrentModificationException detected in main thread : " + e.getMessage()); + exceptionDetected.set(true); // Set flag if exception is detected + } + }); + + executorService.shutdown(); + executorService.awaitTermination(60, TimeUnit.SECONDS); + assertFalse(exceptionDetected.get()); } private IndicesRequestCache getIndicesRequestCache(Settings settings) { IndicesService indicesService = getInstanceFromNode(IndicesService.class); - return new IndicesRequestCache(settings, (shardId -> { - IndexService indexService = null; - try { - indexService = indicesService.indexServiceSafe(shardId.getIndex()); - } catch (IndexNotFoundException ex) { - return Optional.empty(); - } - return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); - }), + return new IndicesRequestCache( + settings, + indicesService.indicesRequestCache.cacheEntityLookup, new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService(), threadPool, ClusterServiceUtils.createClusterService(threadPool) ); } + private DirectoryReader getReader(IndexWriter writer, ShardId shardId) throws IOException { + return OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), shardId); + } + private Loader getLoader(DirectoryReader reader) { return new Loader(reader, 0); } @@ -1419,6 +1466,55 @@ public void testDeleteAndCreateIndexShardOnSameNodeAndVerifyStats() throws Excep IOUtils.close(reader, writer, dir, cache); } + public void testIndexShardClosedAndVerifyCacheCleanUpWorksSuccessfully() throws Exception { + threadPool = getThreadPool(); + String indexName = "test1"; + // Create a shard + IndexService indexService = createIndex( + indexName, + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build() + ); + IndexShard indexShard = indexService.getShard(0); + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); + writer.addDocument(newDoc(0, "foo")); + writer.addDocument(newDoc(1, "hack")); + DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), indexShard.shardId()); + Loader loader = new Loader(reader, 0); + + // Set clean interval to a high value as we will do it manually here. + IndicesRequestCache cache = getIndicesRequestCache( + Settings.builder() + .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY, TimeValue.timeValueMillis(100000)) + .build() + ); + IndicesService.IndexShardCacheEntity cacheEntity = new IndicesService.IndexShardCacheEntity(indexShard); + TermQueryBuilder termQuery = new TermQueryBuilder("id", "bar"); + + // Cache some values for indexShard + BytesReference value = cache.getOrCompute(cacheEntity, loader, reader, getTermBytes()); + + // Verify response and stats. + assertEquals("foo", value.streamInput().readString()); + RequestCacheStats stats = indexShard.requestCache().stats(); + assertEquals("foo", value.streamInput().readString()); + assertEquals(1, cache.count()); + assertEquals(1, stats.getMissCount()); + assertTrue(stats.getMemorySizeInBytes() > 0); + + // Remove the shard making its cache entries stale + IOUtils.close(reader, writer, dir); + indexService.removeShard(0, "force"); + + assertBusy(() -> { assertEquals(IndexShardState.CLOSED, indexShard.state()); }, 1, TimeUnit.SECONDS); + + // Trigger clean up of cache. Should not throw any exception. + cache.cacheCleanupManager.cleanCache(); + // Verify all cleared up. + assertEquals(0, cache.count()); + IOUtils.close(cache); + } + public static String generateString(int length) { String characters = "abcdefghijklmnopqrstuvwxyz"; StringBuilder sb = new StringBuilder(length); diff --git a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java index 6d216370bae9a..a32cd2c3cad3f 100644 --- a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java @@ -97,6 +97,7 @@ import java.util.function.LongSupplier; import java.util.stream.Collectors; +import org.mockito.ArgumentCaptor; import org.mockito.ArgumentMatcher; import org.mockito.invocation.InvocationOnMock; @@ -1894,7 +1895,7 @@ public void testExecuteBulkRequestInBatchWithException() { verify(mockCompoundProcessor, never()).execute(any(), any()); } - public void testExecuteBulkRequestInBatchWithExceptionInCallback() { + public void testExecuteBulkRequestInBatchWithExceptionAndDropInCallback() { CompoundProcessor mockCompoundProcessor = mockCompoundProcessor(); IngestService ingestService = createWithProcessors( Collections.singletonMap("mock", (factories, tag, description, config) -> mockCompoundProcessor) @@ -1906,11 +1907,14 @@ public void testExecuteBulkRequestInBatchWithExceptionInCallback() { bulkRequest.add(indexRequest1); IndexRequest indexRequest2 = new IndexRequest("_index").id("_id2").source(emptyMap()).setPipeline("_id").setFinalPipeline("_none"); bulkRequest.add(indexRequest2); - bulkRequest.batchSize(2); + IndexRequest indexRequest3 = new IndexRequest("_index").id("_id3").source(emptyMap()).setPipeline("_id").setFinalPipeline("_none"); + bulkRequest.add(indexRequest3); + bulkRequest.batchSize(3); List results = Arrays.asList( new IngestDocumentWrapper(0, IngestService.toIngestDocument(indexRequest1), null), - new IngestDocumentWrapper(1, null, new RuntimeException()) + new IngestDocumentWrapper(1, null, new RuntimeException()), + new IngestDocumentWrapper(2, null, null) ); doAnswer(args -> { @SuppressWarnings("unchecked") @@ -1923,16 +1927,22 @@ public void testExecuteBulkRequestInBatchWithExceptionInCallback() { final BiConsumer failureHandler = mock(BiConsumer.class); @SuppressWarnings("unchecked") final BiConsumer completionHandler = mock(BiConsumer.class); + final IntConsumer dropHandler = mock(IntConsumer.class); ingestService.executeBulkRequest( - 2, + 3, bulkRequest.requests(), failureHandler, completionHandler, - indexReq -> {}, + dropHandler, Names.WRITE, bulkRequest ); - verify(failureHandler, times(1)).accept(any(), any()); + ArgumentCaptor failureSlotCaptor = ArgumentCaptor.forClass(Integer.class); + verify(failureHandler, times(1)).accept(failureSlotCaptor.capture(), any()); + assertEquals(1, failureSlotCaptor.getValue().intValue()); + ArgumentCaptor dropSlotCaptor = ArgumentCaptor.forClass(Integer.class); + verify(dropHandler, times(1)).accept(dropSlotCaptor.capture()); + assertEquals(2, dropSlotCaptor.getValue().intValue()); verify(completionHandler, times(1)).accept(Thread.currentThread(), null); verify(mockCompoundProcessor, times(1)).batchExecute(any(), any()); verify(mockCompoundProcessor, never()).execute(any(), any()); diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorTests.java index cdd17e2fa7dd6..b5dd27e37c332 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorTests.java @@ -33,30 +33,56 @@ package org.opensearch.search.aggregations.metrics; import org.apache.lucene.document.BinaryDocValuesField; +import org.apache.lucene.document.Field; import org.apache.lucene.document.IntPoint; +import org.apache.lucene.document.KeywordField; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.util.TestUtil; +import org.apache.lucene.util.BytesRef; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.RangeFieldMapper; import org.opensearch.index.mapper.RangeType; import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorTestCase; +import org.opensearch.search.aggregations.InternalAggregation; +import org.opensearch.search.aggregations.MultiBucketConsumerService; +import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; +import static java.util.Arrays.asList; import static java.util.Collections.singleton; +import static org.opensearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS; +import static org.mockito.Mockito.when; public class CardinalityAggregatorTests extends AggregatorTestCase { @@ -199,4 +225,276 @@ private void testAggregation( ) throws IOException { testCase(aggregationBuilder, query, buildIndex, verify, fieldType); } + + public void testDynamicPruningDisabledWhenExceedingThreshold() throws IOException { + final String fieldName = "testField"; + final String filterFieldName = "filterField"; + + MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType(fieldName); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name").field(fieldName); + + int randomCardinality = randomIntBetween(20, 100); + AtomicInteger counter = new AtomicInteger(); + + testDynamicPruning(aggregationBuilder, new TermQuery(new Term(filterFieldName, "foo")), iw -> { + for (int i = 0; i < randomCardinality; i++) { + String filterValue = "foo"; + if (randomBoolean()) { + filterValue = "bar"; + counter.getAndIncrement(); + } + iw.addDocument( + asList( + new KeywordField(filterFieldName, filterValue, Field.Store.NO), + new KeywordField(fieldName, String.valueOf(i), Field.Store.NO), + new SortedSetDocValuesField(fieldName, new BytesRef(String.valueOf(i))) + ) + ); + } + }, + card -> { assertEquals(randomCardinality - counter.get(), card.getValue(), 0); }, + fieldType, + 10, + (collectCount) -> assertEquals(randomCardinality - counter.get(), (int) collectCount) + ); + } + + public void testDynamicPruningFixedValues() throws IOException { + final String fieldName = "testField"; + final String filterFieldName = "filterField"; + + MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType(fieldName); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name").field(fieldName); + testDynamicPruning(aggregationBuilder, new TermQuery(new Term(filterFieldName, "foo")), iw -> { + iw.addDocument( + asList( + new KeywordField(fieldName, "1", Field.Store.NO), + new KeywordField(fieldName, "2", Field.Store.NO), + new KeywordField(filterFieldName, "foo", Field.Store.NO), + new SortedSetDocValuesField(fieldName, new BytesRef("1")), + new SortedSetDocValuesField(fieldName, new BytesRef("2")) + ) + ); + iw.addDocument( + asList( + new KeywordField(fieldName, "2", Field.Store.NO), + new KeywordField(filterFieldName, "foo", Field.Store.NO), + new SortedSetDocValuesField(fieldName, new BytesRef("2")) + ) + ); + iw.addDocument( + asList( + new KeywordField(fieldName, "1", Field.Store.NO), + new KeywordField(filterFieldName, "foo", Field.Store.NO), + new SortedSetDocValuesField(fieldName, new BytesRef("1")) + ) + ); + iw.addDocument( + asList( + new KeywordField(fieldName, "2", Field.Store.NO), + new KeywordField(filterFieldName, "foo", Field.Store.NO), + new SortedSetDocValuesField(fieldName, new BytesRef("2")) + ) + ); + iw.addDocument( + asList( + new KeywordField(fieldName, "3", Field.Store.NO), + new KeywordField(filterFieldName, "foo", Field.Store.NO), + new SortedSetDocValuesField(fieldName, new BytesRef("3")) + ) + ); + iw.addDocument( + asList( + new KeywordField(fieldName, "4", Field.Store.NO), + new KeywordField(filterFieldName, "bar", Field.Store.NO), + new SortedSetDocValuesField(fieldName, new BytesRef("4")) + ) + ); + iw.addDocument( + asList( + new KeywordField(fieldName, "5", Field.Store.NO), + new KeywordField(filterFieldName, "bar", Field.Store.NO), + new SortedSetDocValuesField(fieldName, new BytesRef("5")) + ) + ); + }, card -> { + assertEquals(3.0, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); + }, fieldType, 100, (collectCount) -> assertEquals(0, (int) collectCount)); + } + + public void testDynamicPruningRandomValues() throws IOException { + final String fieldName = "testField"; + final String filterFieldName = "filterField"; + + MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType(fieldName); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name").field(fieldName); + + int randomCardinality = randomIntBetween(1, 100); + AtomicInteger counter = new AtomicInteger(); + + testDynamicPruning(aggregationBuilder, new TermQuery(new Term(filterFieldName, "foo")), iw -> { + for (int i = 0; i < randomCardinality; i++) { + String filterValue = "foo"; + if (randomBoolean()) { + filterValue = "bar"; + counter.getAndIncrement(); + } + iw.addDocument( + asList( + new KeywordField(filterFieldName, filterValue, Field.Store.NO), + new KeywordField(fieldName, String.valueOf(i), Field.Store.NO), + new SortedSetDocValuesField(fieldName, new BytesRef(String.valueOf(i))) + ) + ); + } + }, card -> { + logger.info("expected {}, cardinality: {}", randomCardinality - counter.get(), card.getValue()); + assertEquals(randomCardinality - counter.get(), card.getValue(), 0); + }, fieldType, 100, (collectCount) -> assertEquals(0, (int) collectCount)); + } + + public void testDynamicPruningRandomDelete() throws IOException { + final String fieldName = "testField"; + + MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType(fieldName); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name").field(fieldName); + + int randomCardinality = randomIntBetween(1, 100); + AtomicInteger counter = new AtomicInteger(); + + testDynamicPruning(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + for (int i = 0; i < randomCardinality; i++) { + iw.addDocument( + asList( + new KeywordField(fieldName, String.valueOf(i), Field.Store.NO), + new SortedSetDocValuesField(fieldName, new BytesRef(String.valueOf(i))) + ) + ); + if (randomBoolean()) { + iw.deleteDocuments(new Term(fieldName, String.valueOf(i))); + counter.getAndIncrement(); + } + } + }, + card -> { assertEquals(randomCardinality - counter.get(), card.getValue(), 0); }, + fieldType, + 100, + (collectCount) -> assertEquals(0, (int) collectCount) + ); + } + + public void testDynamicPruningFieldMissingInSegment() throws IOException { + final String fieldName = "testField"; + final String fieldName2 = "testField2"; + + MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType(fieldName); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name").field(fieldName); + + int randomNumSegments = randomIntBetween(1, 50); + logger.info("Indexing [{}] segments", randomNumSegments); + + testDynamicPruning(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + for (int i = 0; i < randomNumSegments; i++) { + iw.addDocument( + asList( + new KeywordField(fieldName, String.valueOf(i), Field.Store.NO), + new SortedSetDocValuesField(fieldName, new BytesRef(String.valueOf(i))) + ) + ); + iw.commit(); + } + iw.addDocument(List.of(new KeywordField(fieldName2, "100", Field.Store.NO))); + iw.addDocument(List.of(new KeywordField(fieldName2, "101", Field.Store.NO))); + iw.addDocument(List.of(new KeywordField(fieldName2, "102", Field.Store.NO))); + iw.commit(); + }, + card -> { assertEquals(randomNumSegments, card.getValue(), 0); }, + fieldType, + 100, + (collectCount) -> assertEquals(3, (int) collectCount) + ); + } + + private void testDynamicPruning( + AggregationBuilder aggregationBuilder, + Query query, + CheckedConsumer buildIndex, + Consumer verify, + MappedFieldType fieldType, + int pruningThreshold, + Consumer verifyCollectCount + ) throws IOException { + try (Directory directory = newDirectory()) { + try ( + IndexWriter indexWriter = new IndexWriter( + directory, + new IndexWriterConfig().setCodec(TestUtil.getDefaultCodec()).setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + // disable merge so segment number is same as commit times + buildIndex.accept(indexWriter); + } + + try (IndexReader indexReader = DirectoryReader.open(directory)) { + IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + + CountingAggregator aggregator = createCountingAggregator( + query, + aggregationBuilder, + indexSearcher, + fieldType, + pruningThreshold + ); + aggregator.preCollection(); + indexSearcher.search(query, aggregator); + aggregator.postCollection(); + + MultiBucketConsumerService.MultiBucketConsumer reduceBucketConsumer = new MultiBucketConsumerService.MultiBucketConsumer( + Integer.MAX_VALUE, + new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST) + ); + InternalAggregation.ReduceContext context = InternalAggregation.ReduceContext.forFinalReduction( + aggregator.context().bigArrays(), + getMockScriptService(), + reduceBucketConsumer, + PipelineAggregator.PipelineTree.EMPTY + ); + InternalCardinality topLevel = (InternalCardinality) aggregator.buildTopLevel(); + InternalCardinality card = (InternalCardinality) topLevel.reduce(Collections.singletonList(topLevel), context); + doAssertReducedMultiBucketConsumer(card, reduceBucketConsumer); + + verify.accept(card); + + logger.info("aggregator collect count {}", aggregator.getCollectCount().get()); + verifyCollectCount.accept(aggregator.getCollectCount().get()); + } + } + } + + protected CountingAggregator createCountingAggregator( + Query query, + AggregationBuilder builder, + IndexSearcher searcher, + MappedFieldType fieldType, + int pruningThreshold + ) throws IOException { + return new CountingAggregator( + new AtomicInteger(), + createAggregatorWithCustomizableSearchContext( + query, + builder, + searcher, + createIndexSettings(), + new MultiBucketConsumerService.MultiBucketConsumer( + DEFAULT_MAX_BUCKETS, + new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST) + ), + (searchContext) -> { + when(searchContext.cardinalityAggregationPruningThreshold()).thenReturn(pruningThreshold); + }, + fieldType + ) + ); + } } diff --git a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java index 6af04e15acef0..1d545cea67207 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java @@ -35,6 +35,7 @@ import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Pruning; import org.apache.lucene.search.Query; import org.apache.lucene.search.Sort; @@ -48,12 +49,18 @@ import org.opensearch.action.search.SearchShardTask; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.mapper.DocumentMapper; +import org.opensearch.index.mapper.MatchOnlyTextFieldMapper; import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType; import org.opensearch.index.mapper.NumberFieldMapper.NumberType; +import org.opensearch.index.mapper.SourceFieldMapper; +import org.opensearch.index.mapper.TextSearchInfo; import org.opensearch.index.query.ParsedQuery; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.SourceFieldMatchQuery; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.lucene.queries.MinDocQuery; @@ -62,6 +69,9 @@ import org.opensearch.search.internal.ContextIndexSearcher; import org.opensearch.search.internal.ScrollContext; import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.search.lookup.SourceLookup; import org.opensearch.search.profile.ProfileResult; import org.opensearch.search.profile.ProfileShardResult; import org.opensearch.search.profile.SearchProfileShardResults; @@ -80,6 +90,7 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; @@ -94,6 +105,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -1514,6 +1526,90 @@ public void testCollapseQuerySearchResults() throws Exception { dir.close(); } + public void testSourceFieldMatchQueryWithProfile() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = newIndexWriterConfig(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + w.close(); + IndexReader reader = DirectoryReader.open(dir); + QueryShardContext queryShardContext = mock(QueryShardContext.class); + DocumentMapper mockDocumentMapper = mock(DocumentMapper.class); + SourceFieldMapper mockSourceMapper = mock(SourceFieldMapper.class); + SearchLookup searchLookup = mock(SearchLookup.class); + LeafSearchLookup leafSearchLookup = mock(LeafSearchLookup.class); + + when(queryShardContext.sourcePath("foo")).thenReturn(Set.of("bar")); + when(queryShardContext.index()).thenReturn(new Index("test_index", "uuid")); + when(searchLookup.getLeafSearchLookup(any())).thenReturn(leafSearchLookup); + when(leafSearchLookup.source()).thenReturn(new SourceLookup()); + when(mockSourceMapper.enabled()).thenReturn(true); + when(mockDocumentMapper.sourceMapper()).thenReturn(mockSourceMapper); + when(queryShardContext.documentMapper(any())).thenReturn(mockDocumentMapper); + when(queryShardContext.lookup()).thenReturn(searchLookup); + + TestSearchContext context = new TestSearchContext(queryShardContext, indexShard, newContextSearcher(reader, executor)); + context.parsedQuery( + new ParsedQuery( + new SourceFieldMatchQuery( + new TermQuery(new Term("foo", "bar")), + new PhraseQuery("foo", "bar", "baz"), + new MatchOnlyTextFieldMapper.MatchOnlyTextFieldType( + "user", + true, + true, + TextSearchInfo.WHITESPACE_MATCH_ONLY, + Collections.emptyMap() + ), + queryShardContext + ) + ) + ); + + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.setSize(1); + context.trackTotalHitsUpTo(5); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertProfileData(context, "SourceFieldMatchQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + if (executor != null) { + long maxScore = query.getTimeBreakdown().get("max_score"); + long minScore = query.getTimeBreakdown().get("min_score"); + long avgScore = query.getTimeBreakdown().get("avg_score"); + long maxScoreCount = query.getTimeBreakdown().get("max_score_count"); + long minScoreCount = query.getTimeBreakdown().get("min_score_count"); + long avgScoreCount = query.getTimeBreakdown().get("avg_score_count"); + assertThat(maxScore, equalTo(0L)); + assertThat(minScore, equalTo(0L)); + assertThat(avgScore, equalTo(0L)); + assertThat(maxScore, equalTo(avgScore)); + assertThat(avgScore, equalTo(minScore)); + assertThat(maxScoreCount, equalTo(0L)); + assertThat(minScoreCount, equalTo(0L)); + assertThat(avgScoreCount, equalTo(0L)); + assertThat(maxScoreCount, equalTo(avgScoreCount)); + assertThat(avgScoreCount, equalTo(minScoreCount)); + } + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + assertThat(query.getProfiledChildren(), empty()); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + if (collector.getName().contains("CollectorManager")) { + assertThat(collector.getReduceTime(), greaterThan(0L)); + } + assertThat(collector.getMaxSliceTime(), greaterThan(0L)); + assertThat(collector.getMinSliceTime(), greaterThan(0L)); + assertThat(collector.getAvgSliceTime(), greaterThan(0L)); + assertThat(collector.getSliceCount(), greaterThanOrEqualTo(1)); + assertThat(collector.getProfiledChildren(), empty()); + }); + reader.close(); + dir.close(); + } + private void assertProfileData(SearchContext context, String type, Consumer query, Consumer collector) throws IOException { assertProfileData(context, collector, (profileResult) -> { diff --git a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java index 97326377ce245..d8f04a11fe494 100644 --- a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java @@ -155,6 +155,7 @@ private int expectedSize(final String threadPoolName, final int numberOfProcesso sizes.put(ThreadPool.Names.REMOTE_PURGE, ThreadPool::halfAllocatedProcessors); sizes.put(ThreadPool.Names.REMOTE_REFRESH_RETRY, ThreadPool::halfAllocatedProcessors); sizes.put(ThreadPool.Names.REMOTE_RECOVERY, ThreadPool::twiceAllocatedProcessors); + sizes.put(ThreadPool.Names.REMOTE_STATE_READ, ThreadPool::twiceAllocatedProcessors); return sizes.get(threadPoolName).apply(numberOfProcessors); } diff --git a/settings.gradle b/settings.gradle index ca8538a967ef7..a96d00a4ab863 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.develocity" version "3.17.4" + id "com.gradle.develocity" version "3.17.5" } ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE') @@ -18,7 +18,6 @@ ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().co buildCache { local { enabled = !disableBuildCache - removeUnusedEntriesAfterDays = 14 } } diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index ddb876b46fd1c..a532bf0c6287b 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -70,7 +70,7 @@ dependencies { api "org.eclipse.jetty.websocket:javax-websocket-server-impl:${versions.jetty}" api 'org.apache.zookeeper:zookeeper:3.9.2' api "org.apache.commons:commons-text:1.12.0" - api "commons-net:commons-net:3.10.0" + api "commons-net:commons-net:3.11.1" api "ch.qos.logback:logback-core:1.5.6" api "ch.qos.logback:logback-classic:1.2.13" api 'org.apache.kerby:kerb-admin:2.0.3' diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index 02e5d22e147d5..50b27ec000615 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -304,6 +304,20 @@ protected A createAggregator( return createAggregator(aggregationBuilder, searchContext); } + protected A createAggregatorWithCustomizableSearchContext( + Query query, + AggregationBuilder aggregationBuilder, + IndexSearcher indexSearcher, + IndexSettings indexSettings, + MultiBucketConsumer bucketConsumer, + Consumer customizeSearchContext, + MappedFieldType... fieldTypes + ) throws IOException { + SearchContext searchContext = createSearchContext(indexSearcher, indexSettings, query, bucketConsumer, fieldTypes); + customizeSearchContext.accept(searchContext); + return createAggregator(aggregationBuilder, searchContext); + } + protected A createAggregator(AggregationBuilder aggregationBuilder, SearchContext searchContext) throws IOException { @SuppressWarnings("unchecked")