diff --git a/CHANGELOG.md b/CHANGELOG.md index d91b221e1b216..6cebdd3a623fe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,11 +21,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Support prefix list for remote repository attributes([#16271](https://github.com/opensearch-project/OpenSearch/pull/16271)) - Add new configuration setting `synonym_analyzer`, to the `synonym` and `synonym_graph` filters, enabling the specification of a custom analyzer for reading the synonym file ([#16488](https://github.com/opensearch-project/OpenSearch/pull/16488)). - Add stats for remote publication failure and move download failure stats to remote methods([#16682](https://github.com/opensearch-project/OpenSearch/pull/16682/)) +- Added a precaution to handle extreme date values during sorting to prevent `arithmetic_exception: long overflow` ([#16812](https://github.com/opensearch-project/OpenSearch/pull/16812)). ### Dependencies - Bump `com.google.cloud:google-cloud-core-http` from 2.23.0 to 2.47.0 ([#16504](https://github.com/opensearch-project/OpenSearch/pull/16504)) - Bump `google-auth-library-oauth2-http` from 1.7.0 to 1.29.0 in /plugins/repository-gcs ([#16520](https://github.com/opensearch-project/OpenSearch/pull/16520)) -- Bump `com.azure:azure-storage-common` from 12.25.1 to 12.27.1 ([#16521](https://github.com/opensearch-project/OpenSearch/pull/16521)) +- Bump `com.azure:azure-storage-common` from 12.25.1 to 12.28.0 ([#16521](https://github.com/opensearch-project/OpenSearch/pull/16521), [#16808](https://github.com/opensearch-project/OpenSearch/pull/16808)) - Bump `com.google.apis:google-api-services-compute` from v1-rev20240407-2.0.0 to v1-rev20241105-2.0.0 ([#16502](https://github.com/opensearch-project/OpenSearch/pull/16502), [#16548](https://github.com/opensearch-project/OpenSearch/pull/16548), [#16613](https://github.com/opensearch-project/OpenSearch/pull/16613)) - Bump `com.azure:azure-storage-blob` from 12.23.0 to 12.28.1 ([#16501](https://github.com/opensearch-project/OpenSearch/pull/16501)) - Bump `org.apache.hadoop:hadoop-minicluster` from 3.4.0 to 3.4.1 ([#16550](https://github.com/opensearch-project/OpenSearch/pull/16550)) @@ -40,9 +41,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.logging.log4j:log4j-core` from 2.24.1 to 2.24.2 ([#16718](https://github.com/opensearch-project/OpenSearch/pull/16718)) - Bump `jackson` from 2.17.2 to 2.18.2 ([#16733](https://github.com/opensearch-project/OpenSearch/pull/16733)) - Bump `ch.qos.logback:logback-classic` from 1.2.13 to 1.5.12 ([#16716](https://github.com/opensearch-project/OpenSearch/pull/16716)) +- Bump `com.azure:azure-identity` from 1.13.2 to 1.14.2 ([#16778](https://github.com/opensearch-project/OpenSearch/pull/16778)) ### Changed - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391) +- Make entries for dependencies from server/build.gradle to gradle version catalog ([#16707](https://github.com/opensearch-project/OpenSearch/pull/16707)) ### Deprecated - Performing update operation with default pipeline or final pipeline is deprecated ([#16712](https://github.com/opensearch-project/OpenSearch/pull/16712)) @@ -61,7 +64,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix stale cluster state custom file deletion ([#16670](https://github.com/opensearch-project/OpenSearch/pull/16670)) - [Tiered Caching] Fix bug in cache stats API ([#16560](https://github.com/opensearch-project/OpenSearch/pull/16560)) - Bound the size of cache in deprecation logger ([16702](https://github.com/opensearch-project/OpenSearch/issues/16702)) -- Fix _list/shards API failing when closed indices are present in a cluster ([#16606](https://github.com/opensearch-project/OpenSearch/pull/16606)) +- Ensure consistency of system flag on IndexMetadata after diff is applied ([#16644](https://github.com/opensearch-project/OpenSearch/pull/16644)) +- Skip remote-repositories validations for node-joins when RepositoriesService is not in sync with cluster-state ([#16763](https://github.com/opensearch-project/OpenSearch/pull/16763)) +- Fix _list/shards API failing when closed indices are present ([#16606](https://github.com/opensearch-project/OpenSearch/pull/16606)) ### Security diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/OptionalDependenciesPlugin.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/OptionalDependenciesPlugin.groovy index 2bd8835535881..9b687e1037a08 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/OptionalDependenciesPlugin.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/OptionalDependenciesPlugin.groovy @@ -79,7 +79,7 @@ class OptionalDependenciesPlugin implements Plugin { if (foundDep) { if (foundDep.optional) { - foundDep.optional.value = 'true' + foundDep.optional*.value = 'true' } else { foundDep.appendNode(OPTIONAL_IDENTIFIER, 'true') } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java index 77d7997d6d48d..b75bdcffb257b 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java @@ -105,8 +105,7 @@ public DockerAvailability getDockerAvailability() { Result lastResult = null; Version version = null; boolean isVersionHighEnough = false; - boolean isComposeAvailable = false; - boolean isComposeV2Available = false; + DockerComposeAvailability dockerComposeAvailability = null; // Check if the Docker binary exists final Optional dockerBinary = getDockerPath(); @@ -114,7 +113,7 @@ public DockerAvailability getDockerAvailability() { dockerPath = dockerBinary.get(); // Since we use a multi-stage Docker build, check the Docker version meets minimum requirement - lastResult = runCommand(dockerPath, "version", "--format", "{{.Server.Version}}"); + lastResult = runCommand(execOperations, dockerPath, "version", "--format", "{{.Server.Version}}"); if (lastResult.isSuccess()) { version = Version.fromString(lastResult.stdout.trim(), Version.Mode.RELAXED); @@ -123,15 +122,11 @@ public DockerAvailability getDockerAvailability() { if (isVersionHighEnough) { // Check that we can execute a privileged command - lastResult = runCommand(dockerPath, "images"); - + lastResult = runCommand(execOperations, dockerPath, "images"); // If docker all checks out, see if docker-compose is available and working - Optional composePath = getDockerComposePath(); - if (lastResult.isSuccess() && composePath.isPresent()) { - isComposeAvailable = runCommand(composePath.get(), "version").isSuccess(); + if (lastResult.isSuccess()) { + dockerComposeAvailability = DockerComposeAvailability.detect(execOperations, dockerPath).orElse(null); } - - isComposeV2Available = runCommand(dockerPath, "compose", "version").isSuccess(); } } } @@ -140,8 +135,7 @@ public DockerAvailability getDockerAvailability() { this.dockerAvailability = new DockerAvailability( isAvailable, - isComposeAvailable, - isComposeV2Available, + dockerComposeAvailability, isVersionHighEnough, dockerPath, version, @@ -291,17 +285,6 @@ private Optional getDockerPath() { return Arrays.asList(DOCKER_BINARIES).stream().filter(path -> new File(path).exists()).findFirst(); } - /** - * Searches the entries in {@link #DOCKER_COMPOSE_BINARIES} for the Docker Compose CLI. This method does - * not check whether the installation appears usable, see {@link #getDockerAvailability()} instead. - * - * @return the path to a CLI, if available. - */ - private Optional getDockerComposePath() { - // Check if the Docker binary exists - return Arrays.asList(DOCKER_COMPOSE_BINARIES).stream().filter(path -> new File(path).exists()).findFirst(); - } - private void throwDockerRequiredException(final String message) { throwDockerRequiredException(message, null); } @@ -321,7 +304,7 @@ private void throwDockerRequiredException(final String message, Exception e) { * while running the command, or the process was killed after reaching the 10s timeout, * then the exit code will be -1. */ - private Result runCommand(String... args) { + private static Result runCommand(ExecOperations execOperations, String... args) { if (args.length == 0) { throw new IllegalArgumentException("Cannot execute with no command"); } @@ -356,14 +339,9 @@ public static class DockerAvailability { public final boolean isAvailable; /** - * True if docker-compose is available. + * Non-null if docker-compose v1 or v2 is available. */ - public final boolean isComposeAvailable; - - /** - * True if docker compose is available. - */ - public final boolean isComposeV2Available; + public final DockerComposeAvailability dockerComposeAvailability; /** * True if the installed Docker version is >= 17.05 @@ -387,23 +365,70 @@ public static class DockerAvailability { DockerAvailability( boolean isAvailable, - boolean isComposeAvailable, - boolean isComposeV2Available, + DockerComposeAvailability dockerComposeAvailability, boolean isVersionHighEnough, String path, Version version, Result lastCommand ) { this.isAvailable = isAvailable; - this.isComposeAvailable = isComposeAvailable; - this.isComposeV2Available = isComposeV2Available; + this.dockerComposeAvailability = dockerComposeAvailability; this.isVersionHighEnough = isVersionHighEnough; this.path = path; this.version = version; this.lastCommand = lastCommand; } + + public boolean isDockerComposeAvailable() { + return dockerComposeAvailability != null; + } + } + + /** + * Marker interface for Docker Compose availability + */ + private interface DockerComposeAvailability { + /** + * Detects Docker Compose V1/V2 availability + */ + private static Optional detect(ExecOperations execOperations, String dockerPath) { + Optional composePath = getDockerComposePath(); + if (composePath.isPresent()) { + if (runCommand(execOperations, composePath.get(), "version").isSuccess()) { + return Optional.of(new DockerComposeV1Availability()); + } + } + + if (runCommand(execOperations, dockerPath, "compose", "version").isSuccess()) { + return Optional.of(new DockerComposeV2Availability()); + } + + return Optional.empty(); + } + + /** + * Searches the entries in {@link #DOCKER_COMPOSE_BINARIES} for the Docker Compose CLI. This method does + * not check whether the installation appears usable, see {@link #getDockerAvailability()} instead. + * + * @return the path to a CLI, if available. + */ + private static Optional getDockerComposePath() { + // Check if the Docker binary exists + return Arrays.asList(DOCKER_COMPOSE_BINARIES).stream().filter(path -> new File(path).exists()).findFirst(); + } + } + /** + * Docker Compose V1 availability + */ + public static class DockerComposeV1Availability implements DockerComposeAvailability {} + + /** + * Docker Compose V2 availability + */ + public static class DockerComposeV2Availability implements DockerComposeAvailability {} + /** * This class models the result of running a command. It captures the exit code, standard output and standard error. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java index f65e231cd2e50..79b5f837c75ce 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java @@ -43,6 +43,7 @@ import org.opensearch.gradle.SystemPropertyCommandLineArgumentProvider; import org.opensearch.gradle.docker.DockerSupportPlugin; import org.opensearch.gradle.docker.DockerSupportService; +import org.opensearch.gradle.docker.DockerSupportService.DockerComposeV2Availability; import org.opensearch.gradle.info.BuildParams; import org.opensearch.gradle.precommit.TestingConventionsTasks; import org.opensearch.gradle.util.GradleUtils; @@ -171,11 +172,8 @@ public void execute(Task task) { .findFirst(); composeExtension.getExecutable().set(dockerCompose.isPresent() ? dockerCompose.get() : "/usr/bin/docker"); - if (dockerSupport.get().getDockerAvailability().isComposeV2Available) { - composeExtension.getUseDockerComposeV2().set(true); - } else if (dockerSupport.get().getDockerAvailability().isComposeAvailable) { - composeExtension.getUseDockerComposeV2().set(false); - } + composeExtension.getUseDockerComposeV2() + .set(dockerSupport.get().getDockerAvailability().dockerComposeAvailability instanceof DockerComposeV2Availability); tasks.named("composeUp").configure(t -> { // Avoid running docker-compose tasks in parallel in CI due to some issues on certain Linux distributions @@ -232,8 +230,7 @@ private void maybeSkipTask(Provider dockerSupport, TaskPro private void maybeSkipTask(Provider dockerSupport, Task task) { task.onlyIf(spec -> { - boolean isComposeAvailable = dockerSupport.get().getDockerAvailability().isComposeV2Available - || dockerSupport.get().getDockerAvailability().isComposeAvailable; + boolean isComposeAvailable = dockerSupport.get().getDockerAvailability().isDockerComposeAvailable(); if (isComposeAvailable == false) { LOGGER.info("Task {} requires docker-compose but it is unavailable. Task will be skipped.", task.getPath()); } diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 2658d2aa8b561..be59e1d3a5ab6 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -34,6 +34,7 @@ jna = "5.13.0" netty = "4.1.115.Final" joda = "2.12.7" +roaringbitmap = "1.3.0" # project reactor reactor_netty = "1.1.23" @@ -83,3 +84,52 @@ opentelemetrysemconv = "1.27.0-alpha" # arrow dependencies arrow = "17.0.0" flatbuffers = "2.0.0" + +[libraries] +hdrhistogram = { group = "org.hdrhistogram", name = "HdrHistogram", version.ref = "hdrhistogram" } +jakartaannotation = { group = "jakarta.annotation", name = "jakarta.annotation-api", version.ref = "jakarta_annotation" } +jodatime = { group = "joda-time", name = "joda-time", version.ref = "joda" } +jna = { group = "net.java.dev.jna", name = "jna", version.ref = "jna" } +jtscore = { group = "org.locationtech.jts", name = "jts-core", version.ref = "jts" } +jzlib = { group = "com.jcraft", name = "jzlib", version.ref = "jzlib" } +log4japi = { group = "org.apache.logging.log4j", name = "log4j-api", version.ref = "log4j" } +log4jjul = { group = "org.apache.logging.log4j", name = "log4j-jul", version.ref = "log4j" } +log4jcore = { group = "org.apache.logging.log4j", name = "log4j-core", version.ref = "log4j" } +lucene-core = { group = "org.apache.lucene", name = "lucene-core", version.ref = "lucene" } +lucene-analysis-common = { group = "org.apache.lucene", name = "lucene-analysis-common", version.ref = "lucene" } +lucene-backward-codecs = { group = "org.apache.lucene", name = "lucene-backward-codecs", version.ref = "lucene" } +lucene-grouping = { group = "org.apache.lucene", name = "lucene-grouping", version.ref = "lucene" } +lucene-highlighter = { group = "org.apache.lucene", name = "lucene-highlighter", version.ref = "lucene" } +lucene-join = { group = "org.apache.lucene", name = "lucene-join", version.ref = "lucene" } +lucene-memory = { group = "org.apache.lucene", name = "lucene-memory", version.ref = "lucene" } +lucene-misc = { group = "org.apache.lucene", name = "lucene-misc", version.ref = "lucene" } +lucene-queries = { group = "org.apache.lucene", name = "lucene-queries", version.ref = "lucene" } +lucene-queryparser = { group = "org.apache.lucene", name = "lucene-queryparser", version.ref = "lucene" } +lucene-sandbox = { group = "org.apache.lucene", name = "lucene-sandbox", version.ref = "lucene" } +lucene-spatial-extras = { group = "org.apache.lucene", name = "lucene-spatial-extras", version.ref = "lucene" } +lucene-spatial3d = { group = "org.apache.lucene", name = "lucene-spatial3d", version.ref = "lucene" } +lucene-suggest = { group = "org.apache.lucene", name = "lucene-suggest", version.ref = "lucene" } +protobuf = { group = "com.google.protobuf", name = "protobuf-java", version.ref = "protobuf" } +reactivestreams = { group = "io.projectreactor", name = "reactor-core", version.ref = "reactor" } +reactorcore = { group = "org.reactivestreams", name = "reactive-streams", version.ref = "reactivestreams" } +roaringbitmap = { group = "org.roaringbitmap", name = "RoaringBitmap", version.ref = "roaringbitmap" } +spatial4j = { group = "org.locationtech.spatial4j", name = "spatial4j", version.ref = "spatial4j" } +tdigest = { group = "com.tdunning", name = "t-digest", version.ref = "tdigest" } + +[bundles] +lucene = [ + "lucene-core", + "lucene-analysis-common", + "lucene-backward-codecs", + "lucene-grouping", + "lucene-highlighter", + "lucene-join", + "lucene-memory", + "lucene-misc", + "lucene-queries", + "lucene-queryparser", + "lucene-sandbox", + "lucene-spatial-extras", + "lucene-spatial3d", + "lucene-suggest" +] diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index efcd01d2bad5e..d419f6fafeb30 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -47,7 +47,7 @@ dependencies { api 'com.azure:azure-core:1.51.0' api 'com.azure:azure-json:1.3.0' api 'com.azure:azure-xml:1.1.0' - api 'com.azure:azure-storage-common:12.27.1' + api 'com.azure:azure-storage-common:12.28.0' api 'com.azure:azure-core-http-netty:1.15.5' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" @@ -57,7 +57,7 @@ dependencies { api "io.netty:netty-transport-native-unix-common:${versions.netty}" implementation project(':modules:transport-netty4') api 'com.azure:azure-storage-blob:12.28.1' - api 'com.azure:azure-identity:1.13.2' + api 'com.azure:azure-identity:1.14.2' // Start of transitive dependencies for azure-identity api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0' api "net.java.dev.jna:jna-platform:${versions.jna}" @@ -108,6 +108,7 @@ thirdPartyAudit { // Optional and not enabled by Elasticsearch 'com.google.common.util.concurrent.internal.InternalFutureFailureAccess', 'com.google.common.util.concurrent.internal.InternalFutures', + 'com.azure.core.credential.ProofOfPossessionOptions', 'com.azure.storage.internal.avro.implementation.AvroObject', 'com.azure.storage.internal.avro.implementation.AvroReader', 'com.azure.storage.internal.avro.implementation.AvroReaderFactory', diff --git a/plugins/repository-azure/licenses/azure-identity-1.13.2.jar.sha1 b/plugins/repository-azure/licenses/azure-identity-1.13.2.jar.sha1 deleted file mode 100644 index 7c98a9ccba592..0000000000000 --- a/plugins/repository-azure/licenses/azure-identity-1.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -50a1daef3eb5c6ab2e1351a3e3f5a7649a8fe464 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-identity-1.14.2.jar.sha1 b/plugins/repository-azure/licenses/azure-identity-1.14.2.jar.sha1 new file mode 100644 index 0000000000000..7ffc775aea847 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-identity-1.14.2.jar.sha1 @@ -0,0 +1 @@ +85c45e2add38742009a9c5070d2a9d8f192cf8db \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.27.1.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.27.1.jar.sha1 deleted file mode 100644 index d7602da1418d1..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-common-12.27.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c477c5d8c0f2076da1c5345c1097be6a319fe7c4 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.28.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.28.0.jar.sha1 new file mode 100644 index 0000000000000..ed932cd0a07e9 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-common-12.28.0.jar.sha1 @@ -0,0 +1 @@ +3c5b7de96c68947ab74cc7925b27ca2b9f6b91d0 \ No newline at end of file diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java index 970388498ee26..c7eae3eaa220b 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -88,6 +88,7 @@ import fixture.azure.AzureHttpHandler; import reactor.core.scheduler.Schedulers; +import reactor.netty.http.HttpResources; import static java.nio.charset.StandardCharsets.UTF_8; import static org.opensearch.repositories.azure.AzureRepository.Repository.CONTAINER_SETTING; @@ -142,6 +143,7 @@ public void tearDown() throws Exception { @AfterClass public static void shutdownSchedulers() { + HttpResources.disposeLoopsAndConnections(); Schedulers.shutdownNow(); } diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java index 3356e5174592a..0433a13baec2c 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java @@ -49,6 +49,7 @@ import java.util.List; import reactor.core.scheduler.Schedulers; +import reactor.netty.http.HttpResources; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -57,6 +58,7 @@ public class AzureRepositorySettingsTests extends OpenSearchTestCase { @AfterClass public static void shutdownSchedulers() { + HttpResources.disposeLoopsAndConnections(); Schedulers.shutdownNow(); } diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java index ea74a49e593cf..324a20c9030c6 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java @@ -58,6 +58,7 @@ import java.util.Map; import reactor.core.scheduler.Schedulers; +import reactor.netty.http.HttpResources; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.emptyString; @@ -69,6 +70,7 @@ public class AzureStorageServiceTests extends OpenSearchTestCase { @AfterClass public static void shutdownSchedulers() { + HttpResources.disposeLoopsAndConnections(); Schedulers.shutdownNow(); } diff --git a/server/build.gradle b/server/build.gradle index f1679ccfbec30..8dd23491ccd69 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -74,60 +74,46 @@ dependencies { compileOnly project(':libs:opensearch-plugin-classloader') testRuntimeOnly project(':libs:opensearch-plugin-classloader') - // lucene - api "org.apache.lucene:lucene-core:${versions.lucene}" - api "org.apache.lucene:lucene-analysis-common:${versions.lucene}" - api "org.apache.lucene:lucene-backward-codecs:${versions.lucene}" - api "org.apache.lucene:lucene-grouping:${versions.lucene}" - api "org.apache.lucene:lucene-highlighter:${versions.lucene}" - api "org.apache.lucene:lucene-join:${versions.lucene}" - api "org.apache.lucene:lucene-memory:${versions.lucene}" - api "org.apache.lucene:lucene-misc:${versions.lucene}" - api "org.apache.lucene:lucene-queries:${versions.lucene}" - api "org.apache.lucene:lucene-queryparser:${versions.lucene}" - api "org.apache.lucene:lucene-sandbox:${versions.lucene}" - api "org.apache.lucene:lucene-spatial-extras:${versions.lucene}" - api "org.apache.lucene:lucene-spatial3d:${versions.lucene}" - api "org.apache.lucene:lucene-suggest:${versions.lucene}" + api libs.bundles.lucene // utilities api project(":libs:opensearch-cli") // time handling, remove with java 8 time - api "joda-time:joda-time:${versions.joda}" + api libs.jodatime // percentiles aggregation - api "com.tdunning:t-digest:${versions.tdigest}" + api libs.tdigest // percentile ranks aggregation - api "org.hdrhistogram:HdrHistogram:${versions.hdrhistogram}" + api libs.hdrhistogram // lucene spatial - api "org.locationtech.spatial4j:spatial4j:${versions.spatial4j}", optional - api "org.locationtech.jts:jts-core:${versions.jts}", optional + api libs.spatial4j, optional + api libs.jtscore, optional // logging - api "org.apache.logging.log4j:log4j-api:${versions.log4j}" - api "org.apache.logging.log4j:log4j-jul:${versions.log4j}" - api "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional - annotationProcessor "org.apache.logging.log4j:log4j-core:${versions.log4j}" + api libs.log4japi + api libs.log4jjul + api libs.log4jcore, optional + annotationProcessor libs.log4jcore annotationProcessor project(':libs:opensearch-common') // jna - api "net.java.dev.jna:jna:${versions.jna}" + api libs.jna // jcraft - api "com.jcraft:jzlib:${versions.jzlib}" + api libs.jzlib // reactor - api "io.projectreactor:reactor-core:${versions.reactor}" - api "org.reactivestreams:reactive-streams:${versions.reactivestreams}" + api libs.reactorcore + api libs.reactivestreams // protobuf - api "com.google.protobuf:protobuf-java:${versions.protobuf}" - api "jakarta.annotation:jakarta.annotation-api:${versions.jakarta_annotation}" + api libs.protobuf + api libs.jakartaannotation // https://mvnrepository.com/artifact/org.roaringbitmap/RoaringBitmap - implementation 'org.roaringbitmap:RoaringBitmap:1.3.0' + api libs.roaringbitmap testImplementation(project(":test:framework")) { // tests use the locally compiled version of server diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java index 70124c8c46700..377f99cd8b791 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java @@ -33,12 +33,21 @@ package org.opensearch.discovery; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.FailedToCommitClusterStateException; import org.opensearch.cluster.coordination.JoinHelper; +import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.coordination.PublicationTransportHandler; +import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Randomness; import org.opensearch.common.settings.Settings; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryMissingException; +import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.disruption.ServiceDisruptionScheme; @@ -46,10 +55,15 @@ import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportService; +import org.junit.Assert; +import java.util.Arrays; import java.util.HashSet; +import java.util.List; +import java.util.Objects; import java.util.Set; import java.util.concurrent.CountDownLatch; +import java.util.stream.Collectors; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING; @@ -250,4 +264,142 @@ public void testNodeNotReachableFromClusterManager() throws Exception { ensureStableCluster(3); } + /** + * Tests the scenario where-in a cluster-state containing new repository meta-data as part of a node-join from a + * repository-configured node fails on a commit stag and has a master switch. This would lead to master nodes + * doing another round of node-joins with the new cluster-state as the previous attempt had a successful publish. + */ + public void testElectClusterManagerRemotePublicationConfigurationNodeJoinCommitFails() throws Exception { + final String remoteStateRepoName = "remote-state-repo"; + final String remoteRoutingTableRepoName = "routing-table-repo"; + + Settings remotePublicationSettings = buildRemotePublicationNodeAttributes( + remoteStateRepoName, + ReloadableFsRepository.TYPE, + remoteRoutingTableRepoName, + ReloadableFsRepository.TYPE + ); + internalCluster().startClusterManagerOnlyNodes(3); + internalCluster().startDataOnlyNodes(3); + + String clusterManagerNode = internalCluster().getClusterManagerName(); + List nonClusterManagerNodes = Arrays.stream(internalCluster().getNodeNames()) + .filter(node -> !node.equals(clusterManagerNode)) + .collect(Collectors.toList()); + + ensureStableCluster(6); + + MockTransportService clusterManagerTransportService = (MockTransportService) internalCluster().getInstance( + TransportService.class, + clusterManagerNode + ); + logger.info("Blocking Cluster Manager Commit Request on all nodes"); + // This is to allow the new node to have commit failures on the nodes in the send path itself. This will lead to the + // nodes have a successful publish operation but failed commit operation. This will come into play once the new node joins + nonClusterManagerNodes.forEach(node -> { + TransportService targetTransportService = internalCluster().getInstance(TransportService.class, node); + clusterManagerTransportService.addSendBehavior(targetTransportService, (connection, requestId, action, request, options) -> { + if (action.equals(PublicationTransportHandler.COMMIT_STATE_ACTION_NAME)) { + logger.info("--> preventing {} request", PublicationTransportHandler.COMMIT_STATE_ACTION_NAME); + throw new FailedToCommitClusterStateException("Blocking Commit"); + } + connection.sendRequest(requestId, action, request, options); + }); + }); + + logger.info("Starting Node with remote publication settings"); + // Start a node with remote-publication repositories configured. This will lead to the active cluster-manager create + // a new cluster-state event with the new node-join along with new repositories setup in the cluster meta-data. + internalCluster().startDataOnlyNodes(1, remotePublicationSettings, Boolean.TRUE); + + // Checking if publish succeeded in the nodes before shutting down the blocked cluster-manager + assertBusy(() -> { + String randomNode = nonClusterManagerNodes.get(Randomness.get().nextInt(nonClusterManagerNodes.size())); + PersistedStateRegistry registry = internalCluster().getInstance(PersistedStateRegistry.class, randomNode); + + ClusterState state = registry.getPersistedState(PersistedStateRegistry.PersistedStateType.LOCAL).getLastAcceptedState(); + RepositoriesMetadata repositoriesMetadata = state.metadata().custom(RepositoriesMetadata.TYPE); + Boolean isRemoteStateRepoConfigured = Boolean.FALSE; + Boolean isRemoteRoutingTableRepoConfigured = Boolean.FALSE; + + assertNotNull(repositoriesMetadata); + assertNotNull(repositoriesMetadata.repositories()); + + for (RepositoryMetadata repo : repositoriesMetadata.repositories()) { + if (repo.name().equals(remoteStateRepoName)) { + isRemoteStateRepoConfigured = Boolean.TRUE; + } else if (repo.name().equals(remoteRoutingTableRepoName)) { + isRemoteRoutingTableRepoConfigured = Boolean.TRUE; + } + } + // Asserting that the metadata is present in the persisted cluster-state + assertTrue(isRemoteStateRepoConfigured); + assertTrue(isRemoteRoutingTableRepoConfigured); + + RepositoriesService repositoriesService = internalCluster().getInstance(RepositoriesService.class, randomNode); + + isRemoteStateRepoConfigured = isRepoPresentInRepositoryService(repositoriesService, remoteStateRepoName); + isRemoteRoutingTableRepoConfigured = isRepoPresentInRepositoryService(repositoriesService, remoteRoutingTableRepoName); + + // Asserting that the metadata is not present in the repository service. + Assert.assertFalse(isRemoteStateRepoConfigured); + Assert.assertFalse(isRemoteRoutingTableRepoConfigured); + }); + + logger.info("Stopping current Cluster Manager"); + // We stop the current cluster-manager whose outbound paths were blocked. This is to force a new election onto nodes + // we had the new cluster-state published but not commited. + internalCluster().stopCurrentClusterManagerNode(); + + // We expect that the repositories validations are skipped in this case and node-joins succeeds as expected. The + // repositories validations are skipped because even though the cluster-state is updated in the persisted registry, + // the repository service will not be updated as the commit attempt failed. + ensureStableCluster(6); + + String randomNode = nonClusterManagerNodes.get(Randomness.get().nextInt(nonClusterManagerNodes.size())); + + // Checking if the final cluster-state is updated. + RepositoriesMetadata repositoriesMetadata = internalCluster().getInstance(ClusterService.class, randomNode) + .state() + .metadata() + .custom(RepositoriesMetadata.TYPE); + + Boolean isRemoteStateRepoConfigured = Boolean.FALSE; + Boolean isRemoteRoutingTableRepoConfigured = Boolean.FALSE; + + for (RepositoryMetadata repo : repositoriesMetadata.repositories()) { + if (repo.name().equals(remoteStateRepoName)) { + isRemoteStateRepoConfigured = Boolean.TRUE; + } else if (repo.name().equals(remoteRoutingTableRepoName)) { + isRemoteRoutingTableRepoConfigured = Boolean.TRUE; + } + } + + Assert.assertTrue("RemoteState Repo is not set in RepositoriesMetadata", isRemoteStateRepoConfigured); + Assert.assertTrue("RemoteRoutingTable Repo is not set in RepositoriesMetadata", isRemoteRoutingTableRepoConfigured); + + RepositoriesService repositoriesService = internalCluster().getInstance(RepositoriesService.class, randomNode); + + isRemoteStateRepoConfigured = isRepoPresentInRepositoryService(repositoriesService, remoteStateRepoName); + isRemoteRoutingTableRepoConfigured = isRepoPresentInRepositoryService(repositoriesService, remoteRoutingTableRepoName); + + Assert.assertTrue("RemoteState Repo is not set in RepositoryService", isRemoteStateRepoConfigured); + Assert.assertTrue("RemoteRoutingTable Repo is not set in RepositoryService", isRemoteRoutingTableRepoConfigured); + + logger.info("Stopping current Cluster Manager"); + } + + private Boolean isRepoPresentInRepositoryService(RepositoriesService repositoriesService, String repoName) { + try { + Repository remoteStateRepo = repositoriesService.repository(repoName); + if (Objects.nonNull(remoteStateRepo)) { + return Boolean.TRUE; + } + } catch (RepositoryMissingException e) { + return Boolean.FALSE; + } + + return Boolean.FALSE; + } + } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index c8ea5442a0dd0..f70282986ad4e 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -1061,7 +1061,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws * * @opensearch.internal */ - private static class IndexMetadataDiff implements Diff { + static class IndexMetadataDiff implements Diff { private final String index; private final int routingNumShards; @@ -1178,7 +1178,7 @@ public IndexMetadata apply(IndexMetadata part) { builder.customMetadata.putAll(customData.apply(part.customData)); builder.inSyncAllocationIds.putAll(inSyncAllocationIds.apply(part.inSyncAllocationIds)); builder.rolloverInfos.putAll(rolloverInfos.apply(part.rolloverInfos)); - builder.system(part.isSystem); + builder.system(isSystem); builder.context(context); return builder.build(); } diff --git a/server/src/main/java/org/opensearch/common/time/DateUtils.java b/server/src/main/java/org/opensearch/common/time/DateUtils.java index 7ab395a1117e7..e5a019b58f7da 100644 --- a/server/src/main/java/org/opensearch/common/time/DateUtils.java +++ b/server/src/main/java/org/opensearch/common/time/DateUtils.java @@ -272,6 +272,30 @@ public static Instant clampToNanosRange(Instant instant) { return instant; } + static final Instant INSTANT_LONG_MIN_VALUE = Instant.ofEpochMilli(Long.MIN_VALUE); + static final Instant INSTANT_LONG_MAX_VALUE = Instant.ofEpochMilli(Long.MAX_VALUE); + + /** + * Clamps the given {@link Instant} to the valid epoch millisecond range. + * + * - If the input is before {@code Long.MIN_VALUE}, it returns {@code Instant.ofEpochMilli(Long.MIN_VALUE)}. + * - If the input is after {@code Long.MAX_VALUE}, it returns {@code Instant.ofEpochMilli(Long.MAX_VALUE)}. + * - Otherwise, it returns the input as-is. + * + * @param instant the {@link Instant} to clamp + * @return the clamped {@link Instant} + * @throws NullPointerException if the input is {@code null} + */ + public static Instant clampToMillisRange(Instant instant) { + if (instant.isBefore(INSTANT_LONG_MIN_VALUE)) { + return INSTANT_LONG_MIN_VALUE; + } + if (instant.isAfter(INSTANT_LONG_MAX_VALUE)) { + return INSTANT_LONG_MAX_VALUE; + } + return instant; + } + /** * convert a long value to a java time instant * the long value resembles the nanoseconds since the epoch diff --git a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java index 7fbb38c47572c..effee53d7cf63 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java @@ -122,7 +122,7 @@ public enum Resolution { MILLISECONDS(CONTENT_TYPE, NumericType.DATE) { @Override public long convert(Instant instant) { - return instant.toEpochMilli(); + return clampToValidRange(instant).toEpochMilli(); } @Override @@ -132,7 +132,7 @@ public Instant toInstant(long value) { @Override public Instant clampToValidRange(Instant instant) { - return instant; + return DateUtils.clampToMillisRange(instant); } @Override diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java index c1c041ce01198..fb97cf40d90d6 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java @@ -21,6 +21,7 @@ import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryException; +import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -183,6 +184,20 @@ public RepositoriesMetadata updateRepositoriesMetadata(DiscoveryNode joiningNode boolean repositoryAlreadyPresent = false; for (RepositoryMetadata existingRepositoryMetadata : existingRepositories.repositories()) { if (newRepositoryMetadata.name().equals(existingRepositoryMetadata.name())) { + try { + // This is to handle cases where-in the during a previous node-join attempt if the publish operation succeeded + // but the commit operation failed, the cluster-state may have the repository metadata which is not applied + // into the repository service. This may lead to assertion failures down the line. + repositoriesService.get().repository(newRepositoryMetadata.name()); + } catch (RepositoryMissingException e) { + logger.warn( + "Skipping repositories metadata checks: Remote repository [{}] is in the cluster state but not present " + + "in the repository service.", + newRepositoryMetadata.name() + ); + break; + } + try { // This will help in handling two scenarios - // 1. When a fresh cluster is formed and a node tries to join the cluster, the repository diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java index 9aec81536dbd0..49065be0abb25 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java @@ -80,6 +80,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -904,6 +905,12 @@ public void ensureValidSystemRepositoryUpdate(RepositoryMetadata newRepositoryMe Settings newRepositoryMetadataSettings = newRepositoryMetadata.settings(); Settings currentRepositoryMetadataSettings = currentRepositoryMetadata.settings(); + assert Objects.nonNull(repository) : String.format( + Locale.ROOT, + "repository [%s] not present in RepositoryService", + currentRepositoryMetadata.name() + ); + List restrictedSettings = repository.getRestrictedSystemRepositorySettings() .stream() .map(setting -> setting.getKey()) diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index f6fb203bfe1a9..9590e5615d451 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -55,6 +55,7 @@ import org.opensearch.common.util.FeatureFlags; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -1378,6 +1379,72 @@ public void testJoinRemoteStoreClusterWithRemotePublicationNodeInMixedMode() { JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); } + public void testUpdatesClusterStateWithRepositoryMetadataNotInSync() throws Exception { + Map newNodeAttributes = new HashMap<>(); + newNodeAttributes.putAll(remoteStateNodeAttributes(CLUSTER_STATE_REPO)); + newNodeAttributes.putAll(remoteRoutingTableAttributes(ROUTING_TABLE_REPO)); + + final AllocationService allocationService = mock(AllocationService.class); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + RepositoriesService repositoriesService = mock(RepositoriesService.class); + when(repositoriesService.repository(any())).thenThrow(RepositoryMissingException.class); + final RemoteStoreNodeService remoteStoreNodeService = new RemoteStoreNodeService(new SetOnce<>(repositoriesService)::get, null); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + remoteStoreNodeService + ); + + final DiscoveryNode clusterManagerNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + newNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final RepositoryMetadata clusterStateRepo = buildRepositoryMetadata(clusterManagerNode, CLUSTER_STATE_REPO); + final RepositoryMetadata routingTableRepo = buildRepositoryMetadata(clusterManagerNode, ROUTING_TABLE_REPO); + List repositoriesMetadata = new ArrayList<>() { + { + add(clusterStateRepo); + add(routingTableRepo); + } + }; + + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + ) + .metadata(Metadata.builder().putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositoriesMetadata))) + .build(); + + final DiscoveryNode joiningNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + newNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final ClusterStateTaskExecutor.ClusterTasksResult result = joinTaskExecutor.execute( + clusterState, + List.of(new JoinTaskExecutor.Task(joiningNode, "test")) + ); + assertThat(result.executionResults.entrySet(), hasSize(1)); + final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); + assertTrue(taskResult.isSuccess()); + validatePublicationRepositoryMetadata(result.resultingState, clusterManagerNode); + + } + private void validateRepositoryMetadata(ClusterState updatedState, DiscoveryNode existingNode, int expectedRepositories) throws Exception { diff --git a/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java index 92988ab7e9cba..67f9c70b8281e 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java @@ -32,10 +32,13 @@ package org.opensearch.cluster.metadata; +import org.opensearch.Version; import org.opensearch.action.admin.indices.rollover.MaxAgeCondition; import org.opensearch.action.admin.indices.rollover.MaxDocsCondition; import org.opensearch.action.admin.indices.rollover.MaxSizeCondition; import org.opensearch.action.admin.indices.rollover.RolloverInfo; +import org.opensearch.cluster.Diff; +import org.opensearch.common.UUIDs; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -48,6 +51,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -88,6 +92,26 @@ protected NamedXContentRegistry xContentRegistry() { return new NamedXContentRegistry(IndicesModule.getNamedXContents()); } + // Create the index metadata for a given index, with the specified version. + private static IndexMetadata createIndexMetadata(final Index index, final long version) { + return createIndexMetadata(index, version, false); + } + + private static IndexMetadata createIndexMetadata(final Index index, final long version, final boolean isSystem) { + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build(); + return IndexMetadata.builder(index.getName()) + .settings(settings) + .numberOfShards(1) + .numberOfReplicas(0) + .creationDate(System.currentTimeMillis()) + .version(version) + .system(isSystem) + .build(); + } + public void testIndexMetadataSerialization() throws IOException { Integer numShard = randomFrom(1, 2, 4, 8, 16); int numberOfReplicas = randomIntBetween(0, 10); @@ -568,4 +592,18 @@ public void testParseIndexNameCannotFormatNumber() { } } + /** + * Test that changes to indices metadata are applied + */ + public void testIndicesMetadataDiffSystemFlagFlipped() { + String indexUuid = UUIDs.randomBase64UUID(); + Index index = new Index("test-index", indexUuid); + IndexMetadata previousIndexMetadata = createIndexMetadata(index, 1); + IndexMetadata nextIndexMetadata = createIndexMetadata(index, 2, true); + Diff diff = new IndexMetadata.IndexMetadataDiff(previousIndexMetadata, nextIndexMetadata); + IndexMetadata indexMetadataAfterDiffApplied = diff.apply(previousIndexMetadata); + assertTrue(indexMetadataAfterDiffApplied.isSystem()); + assertThat(indexMetadataAfterDiffApplied.getVersion(), equalTo(nextIndexMetadata.getVersion())); + } + } diff --git a/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java b/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java index 98a79f3ca38dc..cb691f2177f6d 100644 --- a/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java +++ b/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java @@ -260,4 +260,21 @@ public void testRoundYear() { long startOf1996 = Year.of(1996).atDay(1).atStartOfDay().toInstant(ZoneOffset.UTC).toEpochMilli(); assertThat(DateUtils.roundYear(endOf1996), is(startOf1996)); } + + public void testClampToMillisRange() { + Instant normalInstant = Instant.now(); + assertEquals(normalInstant, DateUtils.clampToMillisRange(normalInstant)); + + Instant beforeMinInstant = DateUtils.INSTANT_LONG_MIN_VALUE.minusMillis(1); + assertEquals(DateUtils.INSTANT_LONG_MIN_VALUE, DateUtils.clampToMillisRange(beforeMinInstant)); + + Instant afterMaxInstant = DateUtils.INSTANT_LONG_MAX_VALUE.plusMillis(1); + assertEquals(DateUtils.INSTANT_LONG_MAX_VALUE, DateUtils.clampToMillisRange(afterMaxInstant)); + + assertEquals(DateUtils.INSTANT_LONG_MIN_VALUE, DateUtils.clampToMillisRange(DateUtils.INSTANT_LONG_MIN_VALUE)); + + assertEquals(DateUtils.INSTANT_LONG_MAX_VALUE, DateUtils.clampToMillisRange(DateUtils.INSTANT_LONG_MAX_VALUE)); + + assertThrows(NullPointerException.class, () -> DateUtils.clampToMillisRange(null)); + } } diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java index 98bcaa3a1a46b..9032e2cdaed16 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java @@ -156,7 +156,6 @@ public void testIgnoreMalformedLegacy() throws IOException { "failed to parse date field [2016-03-99] with format [strict_date_optional_time||epoch_millis]" ); testIgnoreMalformedForValue("-2147483648", "Invalid value for Year (valid values -999999999 - 999999999): -2147483648"); - testIgnoreMalformedForValue("-522000000", "long overflow"); } public void testIgnoreMalformed() throws IOException { @@ -170,7 +169,6 @@ public void testIgnoreMalformed() throws IOException { "failed to parse date field [2016-03-99] with format [strict_date_time_no_millis||strict_date_optional_time||epoch_millis]" ); testIgnoreMalformedForValue("-2147483648", "Invalid value for Year (valid values -999999999 - 999999999): -2147483648"); - testIgnoreMalformedForValue("-522000000", "long overflow"); } private void testIgnoreMalformedForValue(String value, String expectedCause) throws IOException { diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java index 15b16f4610062..52091d571ee72 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java @@ -31,20 +31,32 @@ package org.opensearch.index.mapper; +import org.apache.lucene.document.Field; import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSortSortedNumericDocValuesRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -71,8 +83,12 @@ import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.Instant; import java.time.ZoneOffset; +import java.util.Arrays; import java.util.Collections; +import java.util.List; +import java.util.Locale; import static org.hamcrest.CoreMatchers.is; import static org.apache.lucene.document.LongPoint.pack; @@ -490,4 +506,187 @@ public void testParseSourceValueNanos() throws IOException { MappedFieldType nullValueMapper = fieldType(Resolution.NANOSECONDS, "strict_date_time||epoch_millis", nullValueDate); assertEquals(Collections.singletonList(nullValueDate), fetchSourceValue(nullValueMapper, null)); } + + public void testDateResolutionForOverflow() throws IOException { + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null)); + + DateFieldType ft = new DateFieldType( + "test_date", + true, + true, + true, + DateFormatter.forPattern("yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis||strict_date_optional_time"), + Resolution.MILLISECONDS, + null, + Collections.emptyMap() + ); + + List dates = Arrays.asList( + null, + "2020-01-01T00:00:00Z", + null, + "2021-01-01T00:00:00Z", + "+292278994-08-17T07:12:55.807Z", + null, + "-292275055-05-16T16:47:04.192Z" + ); + + int numNullDates = 0; + long minDateValue = Long.MAX_VALUE; + long maxDateValue = Long.MIN_VALUE; + + for (int i = 0; i < dates.size(); i++) { + ParseContext.Document doc = new ParseContext.Document(); + String dateStr = dates.get(i); + + if (dateStr != null) { + long timestamp = Resolution.MILLISECONDS.convert(DateFormatters.from(ft.dateTimeFormatter().parse(dateStr)).toInstant()); + doc.add(new LongPoint(ft.name(), timestamp)); + doc.add(new SortedNumericDocValuesField(ft.name(), timestamp)); + doc.add(new StoredField(ft.name(), timestamp)); + doc.add(new StoredField("id", i)); + minDateValue = Math.min(minDateValue, timestamp); + maxDateValue = Math.max(maxDateValue, timestamp); + } else { + numNullDates++; + doc.add(new StoredField("id", i)); + } + w.addDocument(doc); + } + + DirectoryReader reader = DirectoryReader.open(w); + IndexSearcher searcher = new IndexSearcher(reader); + + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + QueryShardContext context = new QueryShardContext( + 0, + new IndexSettings(IndexMetadata.builder("foo").settings(indexSettings).build(), indexSettings), + BigArrays.NON_RECYCLING_INSTANCE, + null, + null, + null, + null, + null, + xContentRegistry(), + writableRegistry(), + null, + null, + () -> nowInMillis, + null, + null, + () -> true, + null + ); + + Query rangeQuery = ft.rangeQuery( + "-292275055-05-16T16:47:04.192Z", + "+292278994-08-17T07:12:55.807Z", + true, + true, + null, + null, + null, + context + ); + + TopDocs topDocs = searcher.search(rangeQuery, dates.size()); + assertEquals("Number of non-null date documents", dates.size() - numNullDates, topDocs.totalHits.value); + + for (ScoreDoc scoreDoc : topDocs.scoreDocs) { + org.apache.lucene.document.Document doc = reader.document(scoreDoc.doc); + IndexableField dateField = doc.getField(ft.name()); + if (dateField != null) { + long dateValue = dateField.numericValue().longValue(); + assertTrue( + "Date value " + dateValue + " should be within valid range", + dateValue >= minDateValue && dateValue <= maxDateValue + ); + } + } + + DateFieldType ftWithNullValue = new DateFieldType( + "test_date", + true, + true, + true, + DateFormatter.forPattern("yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis||strict_date_optional_time"), + Resolution.MILLISECONDS, + "2020-01-01T00:00:00Z", + Collections.emptyMap() + ); + + Query nullValueQuery = ftWithNullValue.termQuery("2020-01-01T00:00:00Z", context); + topDocs = searcher.search(nullValueQuery, dates.size()); + assertEquals("Documents matching the 2020-01-01 date", 1, topDocs.totalHits.value); + + IOUtils.close(reader, w, dir); + } + + public void testDateFieldTypeWithNulls() throws IOException { + DateFieldType ft = new DateFieldType( + "domainAttributes.dueDate", + true, + true, + true, + DateFormatter.forPattern("yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis||date_optional_time"), + Resolution.MILLISECONDS, + null, + Collections.emptyMap() + ); + + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null)); + + int nullDocs = 3500; + int datedDocs = 50; + + for (int i = 0; i < nullDocs; i++) { + ParseContext.Document doc = new ParseContext.Document(); + doc.add(new StringField("domainAttributes.firmId", "12345678910111213", Field.Store.YES)); + w.addDocument(doc); + } + + for (int i = 1; i <= datedDocs; i++) { + ParseContext.Document doc = new ParseContext.Document(); + String dateStr = String.format(Locale.ROOT, "2022-03-%02dT15:40:58.324", (i % 30) + 1); + long timestamp = Resolution.MILLISECONDS.convert(DateFormatters.from(ft.dateTimeFormatter().parse(dateStr)).toInstant()); + doc.add(new StringField("domainAttributes.firmId", "12345678910111213", Field.Store.YES)); + doc.add(new LongPoint(ft.name(), timestamp)); + doc.add(new SortedNumericDocValuesField(ft.name(), timestamp)); + doc.add(new StoredField(ft.name(), timestamp)); + w.addDocument(doc); + } + + DirectoryReader reader = DirectoryReader.open(w); + IndexSearcher searcher = new IndexSearcher(reader); + + BooleanQuery.Builder queryBuilder = new BooleanQuery.Builder(); + queryBuilder.add(new TermQuery(new Term("domainAttributes.firmId", "12345678910111213")), BooleanClause.Occur.MUST); + + Sort sort = new Sort(new SortField(ft.name(), SortField.Type.DOC, false)); + + for (int i = 0; i < 100; i++) { + TopDocs topDocs = searcher.search(queryBuilder.build(), nullDocs + datedDocs, sort); + assertEquals("Total hits should match total documents", nullDocs + datedDocs, topDocs.totalHits.value); + for (ScoreDoc scoreDoc : topDocs.scoreDocs) { + org.apache.lucene.document.Document doc = reader.document(scoreDoc.doc); + IndexableField dateField = doc.getField(ft.name()); + if (dateField != null) { + long dateValue = dateField.numericValue().longValue(); + Instant dateInstant = Instant.ofEpochMilli(dateValue); + assertTrue( + "Date should be in March 2022", + dateInstant.isAfter(Instant.parse("2022-03-01T00:00:00Z")) + && dateInstant.isBefore(Instant.parse("2022-04-01T00:00:00Z")) + ); + } + } + } + IOUtils.close(reader, w, dir); + } } diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index fa5fb736f518f..7b2c653e9bdb2 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -2322,10 +2322,24 @@ public List startNodes(int numOfNodes, Settings settings) { return startNodes(Collections.nCopies(numOfNodes, settings).toArray(new Settings[0])); } + /** + * Starts multiple nodes with the given settings and returns their names + */ + public List startNodes(int numOfNodes, Settings settings, Boolean waitForNodeJoin) { + return startNodes(waitForNodeJoin, Collections.nCopies(numOfNodes, settings).toArray(new Settings[0])); + } + /** * Starts multiple nodes with the given settings and returns their names */ public synchronized List startNodes(Settings... extraSettings) { + return startNodes(false, extraSettings); + } + + /** + * Starts multiple nodes with the given settings and returns their names + */ + public synchronized List startNodes(Boolean waitForNodeJoin, Settings... extraSettings) { final int newClusterManagerCount = Math.toIntExact(Stream.of(extraSettings).filter(DiscoveryNode::isClusterManagerNode).count()); final int defaultMinClusterManagerNodes; if (autoManageClusterManagerNodes) { @@ -2377,7 +2391,7 @@ public synchronized List startNodes(Settings... extraSettings) { nodes.add(nodeAndClient); } startAndPublishNodesAndClients(nodes); - if (autoManageClusterManagerNodes) { + if (autoManageClusterManagerNodes && !waitForNodeJoin) { validateClusterFormed(); } return nodes.stream().map(NodeAndClient::getName).collect(Collectors.toList()); @@ -2422,6 +2436,10 @@ public List startDataOnlyNodes(int numNodes, Settings settings) { return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.DATA_ROLE)).build()); } + public List startDataOnlyNodes(int numNodes, Settings settings, Boolean ignoreNodeJoin) { + return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.DATA_ROLE)).build(), ignoreNodeJoin); + } + public List startSearchOnlyNodes(int numNodes) { return startSearchOnlyNodes(numNodes, Settings.EMPTY); } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 1ee856d3092f0..1c26ea4ca2c91 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -214,6 +214,8 @@ import java.util.function.Function; import java.util.stream.Collectors; +import reactor.util.annotation.NonNull; + import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.common.unit.TimeValue.timeValueMillis; @@ -2915,6 +2917,43 @@ protected static Settings buildRemoteStoreNodeAttributes( return settings.build(); } + protected Settings buildRemotePublicationNodeAttributes( + @NonNull String remoteStateRepoName, + @NonNull String remoteStateRepoType, + @NonNull String routingTableRepoName, + @NonNull String routingTableRepoType + ) { + String remoteStateRepositoryTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + remoteStateRepoName + ); + String routingTableRepositoryTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + routingTableRepoName + ); + String remoteStateRepositorySettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + remoteStateRepoName + ); + String routingTableRepositorySettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + routingTableRepoName + ); + + return Settings.builder() + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, remoteStateRepoName) + .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, routingTableRepoName) + .put(remoteStateRepositoryTypeAttributeKey, remoteStateRepoType) + .put(routingTableRepositoryTypeAttributeKey, routingTableRepoType) + .put(remoteStateRepositorySettingsAttributeKeyPrefix + "location", randomRepoPath().toAbsolutePath()) + .put(routingTableRepositorySettingsAttributeKeyPrefix + "location", randomRepoPath().toAbsolutePath()) + .build(); + } + public static String resolvePath(IndexId indexId, String shardId) { PathType pathType = PathType.fromCode(indexId.getShardPathType()); RemoteStorePathStrategy.SnapshotShardPathInput shardPathInput = new RemoteStorePathStrategy.SnapshotShardPathInput.Builder()