diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 89d894403ff1a..1b9b30625eb83 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@v4 - name: Get changed files id: changed-files-specific - uses: tj-actions/changed-files@v44 + uses: tj-actions/changed-files@v45 with: files_ignore: | release-notes/*.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 8918d4937ba1b..bdb6885207945 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix for hasInitiatedFetching to fix allocation explain and manual reroute APIs (([#14972](https://github.com/opensearch-project/OpenSearch/pull/14972)) - [Workload Management] Add queryGroupId to Task ([14708](https://github.com/opensearch-project/OpenSearch/pull/14708)) - Add setting to ignore throttling nodes for allocation of unassigned primaries in remote restore ([#14991](https://github.com/opensearch-project/OpenSearch/pull/14991)) +- [Workload Management] Add Delete QueryGroup API Logic ([#14735](https://github.com/opensearch-project/OpenSearch/pull/14735)) - [Streaming Indexing] Enhance RestClient with a new streaming API support ([#14437](https://github.com/opensearch-project/OpenSearch/pull/14437)) - Add basic aggregation support for derived fields ([#14618](https://github.com/opensearch-project/OpenSearch/pull/14618)) - [Workload Management] Add Create QueryGroup API Logic ([#14680](https://github.com/opensearch-project/OpenSearch/pull/14680))- [Workload Management] Add Create QueryGroup API Logic ([#14680](https://github.com/opensearch-project/OpenSearch/pull/14680)) @@ -22,15 +23,19 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Workload Management] QueryGroup resource tracking framework changes ([#13897](https://github.com/opensearch-project/OpenSearch/pull/13897)) - Support filtering on a large list encoded by bitmap ([#14774](https://github.com/opensearch-project/OpenSearch/pull/14774)) - Add slice execution listeners to SearchOperationListener interface ([#15153](https://github.com/opensearch-project/OpenSearch/pull/15153)) +- Add allowlist setting for ingest-geoip and ingest-useragent ([#15325](https://github.com/opensearch-project/OpenSearch/pull/15325)) - Adding access to noSubMatches and noOverlappingMatches in Hyphenation ([#13895](https://github.com/opensearch-project/OpenSearch/pull/13895)) - Add support for index level max slice count setting for concurrent segment search ([#15336](https://github.com/opensearch-project/OpenSearch/pull/15336)) +- [Streaming Indexing] Introduce bulk HTTP API streaming flavor ([#15381](https://github.com/opensearch-project/OpenSearch/pull/15381)) +- Add support for centralize snapshot creation with pinned timestamp ([#15124](https://github.com/opensearch-project/OpenSearch/pull/15124)) +- Add concurrent search support for Derived Fields ([#15326](https://github.com/opensearch-project/OpenSearch/pull/15326)) - Add runAs to Subject interface and introduce IdentityAwarePlugin extension point ([#14630](https://github.com/opensearch-project/OpenSearch/pull/14630)) ### Dependencies - Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) - Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.16.0 ([#14861](https://github.com/opensearch-project/OpenSearch/pull/14861), [#15205](https://github.com/opensearch-project/OpenSearch/pull/15205)) - OpenJDK Update (July 2024 Patch releases) ([#14998](https://github.com/opensearch-project/OpenSearch/pull/14998)) -- Bump `com.microsoft.azure:msal4j` from 1.16.1 to 1.16.2 ([#14995](https://github.com/opensearch-project/OpenSearch/pull/14995)) +- Bump `com.microsoft.azure:msal4j` from 1.16.1 to 1.17.0 ([#14995](https://github.com/opensearch-project/OpenSearch/pull/14995), [#15420](https://github.com/opensearch-project/OpenSearch/pull/15420)) - Bump `actions/github-script` from 6 to 7 ([#14997](https://github.com/opensearch-project/OpenSearch/pull/14997)) - Bump `org.tukaani:xz` from 1.9 to 1.10 ([#15110](https://github.com/opensearch-project/OpenSearch/pull/15110)) - Bump `actions/setup-java` from 1 to 4 ([#15104](https://github.com/opensearch-project/OpenSearch/pull/15104)) @@ -45,6 +50,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.azure:azure-core-http-netty` from 1.15.1 to 1.15.3 ([#15300](https://github.com/opensearch-project/OpenSearch/pull/15300)) - Bump `com.gradle.develocity` from 3.17.6 to 3.18 ([#15297](https://github.com/opensearch-project/OpenSearch/pull/15297)) - Bump `commons-cli:commons-cli` from 1.8.0 to 1.9.0 ([#15298](https://github.com/opensearch-project/OpenSearch/pull/15298)) +- Bump `opentelemetry` from 1.40.0 to 1.41.0 ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) +- Bump `opentelemetry-semconv` from 1.26.0-alpha to 1.27.0-alpha ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) +- Bump `tj-actions/changed-files` from 44 to 45 ([#15422](https://github.com/opensearch-project/OpenSearch/pull/15422)) +- Bump `dnsjava:dnsjava` from 3.6.0 to 3.6.1 ([#15418](https://github.com/opensearch-project/OpenSearch/pull/15418)) +- Bump `com.netflix.nebula.ospackage-base` from 11.9.1 to 11.10.0 ([#15419](https://github.com/opensearch-project/OpenSearch/pull/15419)) +- Bump `org.roaringbitmap:RoaringBitmap` from 1.1.0 to 1.2.1 ([#15423](https://github.com/opensearch-project/OpenSearch/pull/15423)) ### Changed - Add lower limit for primary and replica batch allocators timeout ([#14979](https://github.com/opensearch-project/OpenSearch/pull/14979)) @@ -64,6 +75,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fixed array field name omission in flat_object function for nested JSON ([#13620](https://github.com/opensearch-project/OpenSearch/pull/13620)) - Fix range aggregation optimization ignoring top level queries ([#15194](https://github.com/opensearch-project/OpenSearch/pull/15194)) - Fix incorrect parameter names in MinHash token filter configuration handling ([#15233](https://github.com/opensearch-project/OpenSearch/pull/15233)) +- Fix indexing error when flat_object field is explicitly null ([#15375](https://github.com/opensearch-project/OpenSearch/pull/15375)) +- Fix split response processor not included in allowlist ([#15393](https://github.com/opensearch-project/OpenSearch/pull/15393)) +- Fix unchecked cast in dynamic action map getter ([#15394](https://github.com/opensearch-project/OpenSearch/pull/15394)) ### Security diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 9d7bbf6f8f769..98f474a7f0b90 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -37,7 +37,7 @@ reactor_netty = 1.1.22 reactor = 3.5.20 # client dependencies -httpclient5 = 5.2.3 +httpclient5 = 5.3.1 httpcore5 = 5.2.5 httpclient = 4.5.14 httpcore = 4.4.16 @@ -74,5 +74,5 @@ jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.40.0 -opentelemetrysemconv = 1.26.0-alpha +opentelemetry = 1.41.0 +opentelemetrysemconv = 1.27.0-alpha diff --git a/client/rest/licenses/httpclient5-5.2.3.jar.sha1 b/client/rest/licenses/httpclient5-5.2.3.jar.sha1 deleted file mode 100644 index 43e233e72001a..0000000000000 --- a/client/rest/licenses/httpclient5-5.2.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5d753a99d299756998a08c488f2efdf9cf26198e \ No newline at end of file diff --git a/client/rest/licenses/httpclient5-5.3.1.jar.sha1 b/client/rest/licenses/httpclient5-5.3.1.jar.sha1 new file mode 100644 index 0000000000000..c8f32c1ec23a1 --- /dev/null +++ b/client/rest/licenses/httpclient5-5.3.1.jar.sha1 @@ -0,0 +1 @@ +56b53c8f4bcdaada801d311cf2ff8a24d6d96883 \ No newline at end of file diff --git a/client/rest/src/main/java/org/opensearch/client/RestClient.java b/client/rest/src/main/java/org/opensearch/client/RestClient.java index 5c87e3fda5701..ab112ca5219e7 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClient.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClient.java @@ -114,6 +114,7 @@ import java.util.zip.GZIPOutputStream; import org.reactivestreams.Publisher; +import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import reactor.core.publisher.MonoSink; @@ -416,7 +417,12 @@ private Publisher>> streamRequest( try { final ResponseOrResponseException responseOrResponseException = convertResponse(request, node, message); if (responseOrResponseException.responseException == null) { - return Mono.just(message); + return Mono.just( + new Message<>( + message.getHead(), + Flux.from(message.getBody()).flatMapSequential(b -> Flux.fromIterable(frame(b))) + ) + ); } else { if (nodeTuple.nodes.hasNext()) { return Mono.from(streamRequest(nodeTuple, request)); @@ -431,6 +437,48 @@ private Publisher>> streamRequest( }); } + /** + * Frame the {@link ByteBuffer} into individual chunks that are separated by '\r\n' sequence. + * @param b {@link ByteBuffer} to split + * @return individual chunks + */ + private static Collection frame(ByteBuffer b) { + final Collection buffers = new ArrayList<>(); + + int position = b.position(); + while (b.hasRemaining()) { + // Skip the chunk separator when it comes right at the beginning + if (b.get() == '\r' && b.hasRemaining() && b.position() > 1) { + if (b.get() == '\n') { + final byte[] chunk = new byte[b.position() - position]; + + b.position(position); + b.get(chunk); + + // Do not copy the '\r\n' sequence + buffers.add(ByteBuffer.wrap(chunk, 0, chunk.length - 2)); + position = b.position(); + } + } + } + + if (buffers.isEmpty()) { + return Collections.singleton(b); + } + + // Copy last chunk + if (position != b.position()) { + final byte[] chunk = new byte[b.position() - position]; + + b.position(position); + b.get(chunk); + + buffers.add(ByteBuffer.wrap(chunk, 0, chunk.length)); + } + + return buffers; + } + private ResponseOrResponseException convertResponse(InternalRequest request, Node node, ClassicHttpResponse httpResponse) throws IOException { RequestLogger.logResponse(logger, request.httpRequest, node.getHost(), httpResponse); diff --git a/client/sniffer/licenses/httpclient5-5.2.3.jar.sha1 b/client/sniffer/licenses/httpclient5-5.2.3.jar.sha1 deleted file mode 100644 index 43e233e72001a..0000000000000 --- a/client/sniffer/licenses/httpclient5-5.2.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5d753a99d299756998a08c488f2efdf9cf26198e \ No newline at end of file diff --git a/client/sniffer/licenses/httpclient5-5.3.1.jar.sha1 b/client/sniffer/licenses/httpclient5-5.3.1.jar.sha1 new file mode 100644 index 0000000000000..c8f32c1ec23a1 --- /dev/null +++ b/client/sniffer/licenses/httpclient5-5.3.1.jar.sha1 @@ -0,0 +1 @@ +56b53c8f4bcdaada801d311cf2ff8a24d6d96883 \ No newline at end of file diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 621620eef9d71..25af649bb4aed 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.9.1" + id "com.netflix.nebula.ospackage-base" version "11.10.0" } void addProcessFilesTask(String type, boolean jdk) { diff --git a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java index 524d1719c37c0..742b923f05199 100644 --- a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java +++ b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java @@ -44,6 +44,7 @@ import org.opensearch.common.cache.CacheBuilder; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; import org.opensearch.ingest.Processor; import org.opensearch.plugins.IngestPlugin; @@ -62,10 +63,18 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.function.Function; +import java.util.stream.Collectors; import java.util.stream.Stream; public class IngestGeoIpModulePlugin extends Plugin implements IngestPlugin, Closeable { + static final Setting> PROCESSORS_ALLOWLIST_SETTING = Setting.listSetting( + "ingest.geoip.processors.allowed", + List.of(), + Function.identity(), + Setting.Property.NodeScope + ); public static final Setting CACHE_SIZE = Setting.longSetting("ingest.geoip.cache_size", 1000, 0, Setting.Property.NodeScope); static String[] DEFAULT_DATABASE_FILENAMES = new String[] { "GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb" }; @@ -74,7 +83,7 @@ public class IngestGeoIpModulePlugin extends Plugin implements IngestPlugin, Clo @Override public List> getSettings() { - return Arrays.asList(CACHE_SIZE); + return Arrays.asList(CACHE_SIZE, PROCESSORS_ALLOWLIST_SETTING); } @Override @@ -90,7 +99,10 @@ public Map getProcessors(Processor.Parameters paramet } catch (IOException e) { throw new RuntimeException(e); } - return Collections.singletonMap(GeoIpProcessor.TYPE, new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(cacheSize))); + return filterForAllowlistSetting( + parameters.env.settings(), + Map.of(GeoIpProcessor.TYPE, new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(cacheSize))) + ); } /* @@ -175,6 +187,30 @@ public void close() throws IOException { } } + private Map filterForAllowlistSetting(Settings settings, Map map) { + if (PROCESSORS_ALLOWLIST_SETTING.exists(settings) == false) { + return Map.copyOf(map); + } + final Set allowlist = Set.copyOf(PROCESSORS_ALLOWLIST_SETTING.get(settings)); + // Assert that no unknown processors are defined in the allowlist + final Set unknownAllowlistProcessors = allowlist.stream() + .filter(p -> map.containsKey(p) == false) + .collect(Collectors.toUnmodifiableSet()); + if (unknownAllowlistProcessors.isEmpty() == false) { + throw new IllegalArgumentException( + "Processor(s) " + + unknownAllowlistProcessors + + " were defined in [" + + PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist" + ); + } + return map.entrySet() + .stream() + .filter(e -> allowlist.contains(e.getKey())) + .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue)); + } + /** * The in-memory cache for the geoip data. There should only be 1 instance of this class.. * This cache differs from the maxmind's {@link NodeCache} such that this cache stores the deserialized Json objects to avoid the diff --git a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java index f6d42d1d65670..9446ec1228532 100644 --- a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java +++ b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java @@ -35,8 +35,20 @@ import com.maxmind.geoip2.model.AbstractResponse; import org.opensearch.common.network.InetAddresses; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.TestEnvironment; +import org.opensearch.ingest.Processor; import org.opensearch.ingest.geoip.IngestGeoIpModulePlugin.GeoIpCache; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.StreamsUtils; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Set; import static org.mockito.Mockito.mock; @@ -77,4 +89,87 @@ public void testInvalidInit() { IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new GeoIpCache(-1)); assertEquals("geoip max cache size must be 0 or greater", ex.getMessage()); } + + public void testAllowList() throws IOException { + runAllowListTest(List.of()); + runAllowListTest(List.of("geoip")); + } + + public void testInvalidAllowList() throws IOException { + List invalidAllowList = List.of("set"); + Settings.Builder settingsBuilder = Settings.builder() + .putList(IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), invalidAllowList); + createDb(settingsBuilder); + try (IngestGeoIpModulePlugin plugin = new IngestGeoIpModulePlugin()) { + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> plugin.getProcessors(createParameters(settingsBuilder.build())) + ); + assertEquals( + "Processor(s) " + + invalidAllowList + + " were defined in [" + + IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist", + e.getMessage() + ); + } + } + + public void testAllowListNotSpecified() throws IOException { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.remove(IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey()); + createDb(settingsBuilder); + try (IngestGeoIpModulePlugin plugin = new IngestGeoIpModulePlugin()) { + final Set expected = Set.of("geoip"); + assertEquals(expected, plugin.getProcessors(createParameters(settingsBuilder.build())).keySet()); + } + } + + private void runAllowListTest(List allowList) throws IOException { + Settings.Builder settingsBuilder = Settings.builder(); + createDb(settingsBuilder); + final Settings settings = settingsBuilder.putList(IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), allowList).build(); + try (IngestGeoIpModulePlugin plugin = new IngestGeoIpModulePlugin()) { + assertEquals(Set.copyOf(allowList), plugin.getProcessors(createParameters(settings)).keySet()); + } + } + + private void createDb(Settings.Builder settingsBuilder) throws IOException { + Path configDir = createTempDir(); + Path userAgentConfigDir = configDir.resolve("ingest-geoip"); + Files.createDirectories(userAgentConfigDir); + settingsBuilder.put("ingest.geoip.database_path", configDir).put("path.home", configDir); + try { + Files.copy( + new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-City.mmdb")), + configDir.resolve("GeoLite2-City.mmdb") + ); + Files.copy( + new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-Country.mmdb")), + configDir.resolve("GeoLite2-Country.mmdb") + ); + Files.copy( + new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-ASN.mmdb")), + configDir.resolve("GeoLite2-ASN.mmdb") + ); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private static Processor.Parameters createParameters(Settings settings) { + return new Processor.Parameters( + TestEnvironment.newEnvironment(settings), + null, + null, + null, + () -> 0L, + (a, b) -> null, + null, + null, + $ -> {}, + null + ); + } } diff --git a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java index cd96fcf2347ef..bac90d20b44e1 100644 --- a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java +++ b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java @@ -33,6 +33,7 @@ package org.opensearch.ingest.useragent; import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; import org.opensearch.ingest.Processor; import org.opensearch.plugins.IngestPlugin; import org.opensearch.plugins.Plugin; @@ -47,10 +48,19 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; import java.util.stream.Stream; public class IngestUserAgentModulePlugin extends Plugin implements IngestPlugin { + static final Setting> PROCESSORS_ALLOWLIST_SETTING = Setting.listSetting( + "ingest.useragent.processors.allowed", + List.of(), + Function.identity(), + Setting.Property.NodeScope + ); private final Setting CACHE_SIZE_SETTING = Setting.longSetting( "ingest.user_agent.cache_size", 1000, @@ -77,7 +87,34 @@ public Map getProcessors(Processor.Parameters paramet } catch (IOException e) { throw new RuntimeException(e); } - return Collections.singletonMap(UserAgentProcessor.TYPE, new UserAgentProcessor.Factory(userAgentParsers)); + return filterForAllowlistSetting( + parameters.env.settings(), + Collections.singletonMap(UserAgentProcessor.TYPE, new UserAgentProcessor.Factory(userAgentParsers)) + ); + } + + private Map filterForAllowlistSetting(Settings settings, Map map) { + if (PROCESSORS_ALLOWLIST_SETTING.exists(settings) == false) { + return Map.copyOf(map); + } + final Set allowlist = Set.copyOf(PROCESSORS_ALLOWLIST_SETTING.get(settings)); + // Assert that no unknown processors are defined in the allowlist + final Set unknownAllowlistProcessors = allowlist.stream() + .filter(p -> map.containsKey(p) == false) + .collect(Collectors.toUnmodifiableSet()); + if (unknownAllowlistProcessors.isEmpty() == false) { + throw new IllegalArgumentException( + "Processor(s) " + + unknownAllowlistProcessors + + " were defined in [" + + PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist" + ); + } + return map.entrySet() + .stream() + .filter(e -> allowlist.contains(e.getKey())) + .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue)); } static Map createUserAgentParsers(Path userAgentConfigDirectory, UserAgentCache cache) throws IOException { diff --git a/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java b/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java new file mode 100644 index 0000000000000..31fdafff1188a --- /dev/null +++ b/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.useragent; + +import org.opensearch.common.settings.Settings; +import org.opensearch.env.TestEnvironment; +import org.opensearch.ingest.Processor; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Set; + +public class IngestUserAgentModulePluginTests extends OpenSearchTestCase { + private Settings.Builder settingsBuilder; + + @Before + public void setup() throws IOException { + Path configDir = createTempDir(); + Path userAgentConfigDir = configDir.resolve("ingest-user-agent"); + Files.createDirectories(userAgentConfigDir); + settingsBuilder = Settings.builder().put("ingest-user-agent", configDir).put("path.home", configDir); + + // Copy file, leaving out the device parsers at the end + String regexWithoutDevicesFilename = "regexes_without_devices.yml"; + try ( + BufferedReader reader = new BufferedReader( + new InputStreamReader(UserAgentProcessor.class.getResourceAsStream("/regexes.yml"), StandardCharsets.UTF_8) + ); + BufferedWriter writer = Files.newBufferedWriter(userAgentConfigDir.resolve(regexWithoutDevicesFilename)); + ) { + String line; + while ((line = reader.readLine()) != null) { + if (line.startsWith("device_parsers:")) { + break; + } + + writer.write(line); + writer.newLine(); + } + } + } + + public void testAllowList() throws IOException { + runAllowListTest(List.of()); + runAllowListTest(List.of("user_agent")); + } + + public void testInvalidAllowList() throws IOException { + List invalidAllowList = List.of("set"); + final Settings settings = settingsBuilder.putList( + IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), + invalidAllowList + ).build(); + try (IngestUserAgentModulePlugin plugin = new IngestUserAgentModulePlugin()) { + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> plugin.getProcessors(createParameters(settings)) + ); + assertEquals( + "Processor(s) " + + invalidAllowList + + " were defined in [" + + IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist", + e.getMessage() + ); + } + } + + public void testAllowListNotSpecified() throws IOException { + settingsBuilder.remove(IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey()); + try (IngestUserAgentModulePlugin plugin = new IngestUserAgentModulePlugin()) { + final Set expected = Set.of("user_agent"); + assertEquals(expected, plugin.getProcessors(createParameters(settingsBuilder.build())).keySet()); + } + } + + private void runAllowListTest(List allowList) throws IOException { + final Settings settings = settingsBuilder.putList(IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), allowList) + .build(); + try (IngestUserAgentModulePlugin plugin = new IngestUserAgentModulePlugin()) { + assertEquals(Set.copyOf(allowList), plugin.getProcessors(createParameters(settings)).keySet()); + } + } + + private static Processor.Parameters createParameters(Settings settings) { + return new Processor.Parameters( + TestEnvironment.newEnvironment(settings), + null, + null, + null, + () -> 0L, + (a, b) -> null, + null, + null, + $ -> {}, + null + ); + } +} diff --git a/modules/lang-painless/src/internalClusterTest/java/org/opensearch/painless/SimplePainlessIT.java b/modules/lang-painless/src/internalClusterTest/java/org/opensearch/painless/SimplePainlessIT.java index df327bf4871c6..c9078fdeeea28 100644 --- a/modules/lang-painless/src/internalClusterTest/java/org/opensearch/painless/SimplePainlessIT.java +++ b/modules/lang-painless/src/internalClusterTest/java/org/opensearch/painless/SimplePainlessIT.java @@ -188,10 +188,6 @@ public void testTermsValuesSource() throws Exception { } public void testSimpleDerivedFieldsQuery() { - assumeFalse( - "Derived fields do not support concurrent search https://github.com/opensearch-project/OpenSearch/issues/15007", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); SearchRequest searchRequest = new SearchRequest("test-df").source( SearchSourceBuilder.searchSource() .derivedField("result", "keyword", new Script("emit(params._source[\"field\"])")) @@ -204,10 +200,6 @@ public void testSimpleDerivedFieldsQuery() { } public void testSimpleDerivedFieldsAgg() { - assumeFalse( - "Derived fields do not support concurrent search https://github.com/opensearch-project/OpenSearch/issues/15007", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); SearchRequest searchRequest = new SearchRequest("test-df").source( SearchSourceBuilder.searchSource() .derivedField("result", "keyword", new Script("emit(params._source[\"field\"])")) diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java index 2a2de9debb9d9..488b9e632aa2a 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java @@ -97,6 +97,8 @@ public Map> getResponseProces new TruncateHitsResponseProcessor.Factory(), CollapseResponseProcessor.TYPE, new CollapseResponseProcessor.Factory(), + SplitResponseProcessor.TYPE, + new SplitResponseProcessor.Factory(), SortResponseProcessor.TYPE, new SortResponseProcessor.Factory() ) diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java index 404842742629c..e10f06da29ba0 100644 --- a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java @@ -47,6 +47,7 @@ public void testResponseProcessorAllowlist() throws IOException { List.of("rename_field", "truncate_hits", "collapse"), SearchPipelineCommonModulePlugin::getResponseProcessors ); + runAllowlistTest(key, List.of("split", "sort"), SearchPipelineCommonModulePlugin::getResponseProcessors); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, @@ -82,7 +83,7 @@ public void testAllowlistNotSpecified() throws IOException { try (SearchPipelineCommonModulePlugin plugin = new SearchPipelineCommonModulePlugin()) { assertEquals(Set.of("oversample", "filter_query", "script"), plugin.getRequestProcessors(createParameters(settings)).keySet()); assertEquals( - Set.of("rename_field", "truncate_hits", "collapse", "sort"), + Set.of("rename_field", "truncate_hits", "collapse", "split", "sort"), plugin.getResponseProcessors(createParameters(settings)).keySet() ); assertEquals(Set.of(), plugin.getSearchPhaseResultsProcessors(createParameters(settings)).keySet()); diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 6844311927db0..e76556f24cc23 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -61,7 +61,7 @@ dependencies { // Start of transitive dependencies for azure-identity api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0' api "net.java.dev.jna:jna-platform:${versions.jna}" - api 'com.microsoft.azure:msal4j:1.16.2' + api 'com.microsoft.azure:msal4j:1.17.0' api 'com.nimbusds:oauth2-oidc-sdk:11.9.1' api 'com.nimbusds:nimbus-jose-jwt:9.40' api 'com.nimbusds:content-type:2.3' diff --git a/plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 deleted file mode 100644 index 1363e5a0793d2..0000000000000 --- a/plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b43ec4dd657f8ed5922bc0a8ccbe49000968bd15 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/msal4j-1.17.0.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.17.0.jar.sha1 new file mode 100644 index 0000000000000..34101c989eecd --- /dev/null +++ b/plugins/repository-azure/licenses/msal4j-1.17.0.jar.sha1 @@ -0,0 +1 @@ +7d37157da92b719f250b0023234ac9dda922a2a5 \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java index 01b75c0b915f2..b5c526451899e 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Priority; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.blobstore.BlobStoreException; @@ -391,6 +392,7 @@ public void finalizeSnapshot( SnapshotInfo snapshotInfo, Version repositoryMetaVersion, Function stateTransformer, + Priority repositoryUpdatePriority, ActionListener listener ) { super.finalizeSnapshot( @@ -400,6 +402,7 @@ public void finalizeSnapshot( snapshotInfo, repositoryMetaVersion, stateTransformer, + repositoryUpdatePriority, listener ); } diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index 66d172e3dc7f3..872d928aa093f 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -86,7 +86,9 @@ thirdPartyAudit { 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', 'kotlin.io.path.PathsKt', 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', - 'io.opentelemetry.sdk.autoconfigure.spi.internal.AutoConfigureListener' + 'io.opentelemetry.sdk.autoconfigure.spi.internal.AutoConfigureListener', + 'io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider', + 'io.opentelemetry.sdk.autoconfigure.spi.internal.StructuredConfigProperties' ) } diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 deleted file mode 100644 index 04ec81edf969c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6db562f2b74ffaa7253d740e9aa7a3c4f2e392ec \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..ead8fb235fa12 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 @@ -0,0 +1 @@ +ec5ad3b420c9fba4b340e85a3199fd0f2accd023 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 deleted file mode 100644 index bcd7c886b5f6c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -43115633361430a3c6aaa39fd78363014ac79270 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..b601a4fb5246f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 @@ -0,0 +1 @@ +fd387313cc37a6e93062e9a80a2526634d22cb19 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 deleted file mode 100644 index 9716ec518c886..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf1db0f288b9baaabdb439ab6179b673b751511e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..74b7cb25cdfe5 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 @@ -0,0 +1 @@ +3d7cf15ef425053e24e825160ca7b4ac08d721aa \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 deleted file mode 100644 index c0e79b05aa675..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b883b179c242a1761df2d408fe01ec41b17327a3 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..d8d8f75850cb6 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 @@ -0,0 +1 @@ +cf92f4c1b60c2359c12f6f323f6a2a623c333910 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 deleted file mode 100644 index 1df0ad183c475..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a8c1f9b05ac9fb1259517cf53950ccecaf84ebe1 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..3e1212943f894 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 @@ -0,0 +1 @@ +8dee21440b811004ecc1c36c1cd44f9d3494546c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 deleted file mode 100644 index ebeb639a8459c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d8b92bcdb0ace48fb5764cc1ad7a0de197d5b8c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..21a29cc8445e5 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 @@ -0,0 +1 @@ +d86e60b6d49e389ebe5797d42a7288a20d30c162 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 deleted file mode 100644 index b630c808d4763..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -80fa10130cc7e7626e2581aa7c5871eab7381889 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..ae522ac698aa8 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 @@ -0,0 +1 @@ +aeba3075b8dfd97779edadc0a3711d999bb0e396 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 deleted file mode 100644 index eda90dc825e6f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -006dcdbf8eb911ad4d11c54fa824f5a97f582850 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..a741d0a167d60 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 @@ -0,0 +1 @@ +368d7905d6a0a313c63e3a91f895a3a08500519e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 deleted file mode 100644 index cdd7dc6551b33..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -59f260c5412b79a5a40c7d433600248727cd195a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..972e7de1c74be --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 @@ -0,0 +1 @@ +c740e8f7d0d914d6acd310ac53901bb8753c6e8d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 deleted file mode 100644 index 668291498bbae..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7042214012232a5d6a251aca4aa5932014a4946b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..c56ca0b9e8169 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 @@ -0,0 +1 @@ +b820861f85ba83db0ad896c47f723208d7473d5a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 deleted file mode 100644 index 74f0786e21954..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1c6b884d65f79d40429263ac0ab7ed1422237837 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..39db6cb73727f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 @@ -0,0 +1 @@ +f88ee292f5605c87dfe85c8d90131bce9f0b3b8e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 deleted file mode 100644 index 23ef1bf6e6b2c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a1c9b33a8660ace82aecb7f1c7ea50093dc87f0a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..6dcd496e033d3 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 @@ -0,0 +1 @@ +9d1200befb28e3e9f61073ac3de23cc55e509dc7 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 deleted file mode 100644 index aea753f0df18b..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5145f077bf2821ad243617baf8c1810d29af8566 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..161e400f87077 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 @@ -0,0 +1 @@ +d9bbc2e2e800317d72fbf3141ae8391e95fa6229 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 deleted file mode 100644 index 7124dcb31da3f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -955de1d2de4d3d2bb6ba2498f19c9a06da2f3956 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..e986b4b53388e --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 @@ -0,0 +1 @@ +906d916bee46f60260c09314284b5948c54a0662 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingIT.java b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingIT.java index c564e289e3f88..6f3895fffa437 100644 --- a/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingIT.java +++ b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingIT.java @@ -44,7 +44,7 @@ public void tearDown() throws Exception { super.tearDown(); } - public void testStreamingRequest() throws IOException { + public void testStreamingRequestNoBatching() throws IOException { final VirtualTimeScheduler scheduler = VirtualTimeScheduler.create(true); final Stream stream = IntStream.range(1, 6) @@ -85,6 +85,167 @@ public void testStreamingRequest() throws IOException { assertThat(count, equalTo(5)); } + public void testStreamingRequestOneBatchBySize() throws IOException, InterruptedException { + final Stream stream = IntStream.range(1, 6) + .mapToObj(id -> "{ \"index\": { \"_index\": \"test-streaming\", \"_id\": \"" + id + "\" } }\n" + "{ \"name\": \"josh\" }\n"); + + final Duration delay = Duration.ofMillis(1); + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).delayElements(delay).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + streamingRequest.addParameter("refresh", "true"); + streamingRequest.addParameter("batch_size", "5"); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + + StepVerifier.create(Flux.from(streamingResponse.getBody()).map(b -> new String(b.array(), StandardCharsets.UTF_8))) + .expectNextMatches( + s -> s.contains("\"result\":\"created\"") + && s.contains("\"_id\":\"1\"") + && s.contains("\"result\":\"created\"") + && s.contains("\"_id\":\"2\"") + && s.contains("\"result\":\"created\"") + && s.contains("\"_id\":\"3\"") + && s.contains("\"result\":\"created\"") + && s.contains("\"_id\":\"4\"") + && s.contains("\"result\":\"created\"") + && s.contains("\"_id\":\"5\"") + ) + .expectComplete() + .verify(); + + assertThat(streamingResponse.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(streamingResponse.getWarnings(), empty()); + + final Request request = new Request("GET", "/test-streaming/_count"); + final Response response = client().performRequest(request); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Integer count = objectPath.evaluate("count"); + assertThat(count, equalTo(5)); + } + + public void testStreamingRequestManyBatchesBySize() throws IOException { + final Stream stream = IntStream.range(1, 6) + .mapToObj(id -> "{ \"index\": { \"_index\": \"test-streaming\", \"_id\": \"" + id + "\" } }\n" + "{ \"name\": \"josh\" }\n"); + + final Duration delay = Duration.ofMillis(1); + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).delayElements(delay).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + streamingRequest.addParameter("refresh", "true"); + streamingRequest.addParameter("batch_size", "3"); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + + StepVerifier.create(Flux.from(streamingResponse.getBody()).map(b -> new String(b.array(), StandardCharsets.UTF_8))) + .expectNextMatches( + s -> s.contains("\"result\":\"created\"") + && s.contains("\"_id\":\"1\"") + && s.contains("\"result\":\"created\"") + && s.contains("\"_id\":\"2\"") + && s.contains("\"result\":\"created\"") + && s.contains("\"_id\":\"3\"") + ) + .expectNextMatches( + s -> s.contains("\"result\":\"created\"") + && s.contains("\"_id\":\"4\"") + && s.contains("\"result\":\"created\"") + && s.contains("\"_id\":\"5\"") + ) + .expectComplete() + .verify(); + + assertThat(streamingResponse.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(streamingResponse.getWarnings(), empty()); + + final Request request = new Request("GET", "/test-streaming/_count"); + final Response response = client().performRequest(request); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Integer count = objectPath.evaluate("count"); + assertThat(count, equalTo(5)); + } + + public void testStreamingRequestManyBatchesByInterval() throws IOException { + final Stream stream = IntStream.range(1, 6) + .mapToObj(id -> "{ \"index\": { \"_index\": \"test-streaming\", \"_id\": \"" + id + "\" } }\n" + "{ \"name\": \"josh\" }\n"); + + final Duration delay = Duration.ofMillis(500); + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).delayElements(delay).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + streamingRequest.addParameter("refresh", "true"); + streamingRequest.addParameter("batch_interval", "5s"); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + + // We don't check for a other documents here since those may appear in any of the chunks (it is very + // difficult to get the timing right). But at the end, the total number of the documents is being checked. + StepVerifier.create(Flux.from(streamingResponse.getBody()).map(b -> new String(b.array(), StandardCharsets.UTF_8))) + .expectNextMatches( + s -> s.contains("\"result\":\"created\"") + && s.contains("\"_id\":\"1\"") + && s.contains("\"result\":\"created\"") + && s.contains("\"_id\":\"2\"") + && s.contains("\"result\":\"created\"") + && s.contains("\"_id\":\"3\"") + && s.contains("\"result\":\"created\"") + && s.contains("\"_id\":\"4\"") + && s.contains("\"result\":\"created\"") + && s.contains("\"_id\":\"5\"") + ) + .expectComplete() + .verify(); + + assertThat(streamingResponse.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(streamingResponse.getWarnings(), empty()); + + final Request request = new Request("GET", "/test-streaming/_count"); + final Response response = client().performRequest(request); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Integer count = objectPath.evaluate("count"); + assertThat(count, equalTo(5)); + } + + public void testStreamingRequestManyBatchesByIntervalAndSize() throws IOException { + final Stream stream = IntStream.range(1, 6) + .mapToObj(id -> "{ \"index\": { \"_index\": \"test-streaming\", \"_id\": \"" + id + "\" } }\n" + "{ \"name\": \"josh\" }\n"); + + final Duration delay = Duration.ofSeconds(1); + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).delayElements(delay).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + streamingRequest.addParameter("refresh", "true"); + streamingRequest.addParameter("batch_interval", "3s"); + streamingRequest.addParameter("batch_size", "5"); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + + // We don't check for a other documents here since those may appear in any of the chunks (it is very + // difficult to get the timing right). But at the end, the total number of the documents is being checked. + StepVerifier.create(Flux.from(streamingResponse.getBody()).map(b -> new String(b.array(), StandardCharsets.UTF_8))) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"1\"")) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"5\"")) + .expectComplete() + .verify(); + + assertThat(streamingResponse.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(streamingResponse.getWarnings(), empty()); + + final Request request = new Request("GET", "/test-streaming/_count"); + final Response response = client().performRequest(request); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Integer count = objectPath.evaluate("count"); + assertThat(count, equalTo(5)); + } + public void testStreamingBadRequest() throws IOException { final Stream stream = Stream.of( "{ \"index\": { \"_index\": \"test-streaming\", \"_id\": \"1\" } }\n" + "{ \"name\": \"josh\" }\n" diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java index 1aa03aa9967e2..12ed847c0c0de 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java @@ -103,7 +103,6 @@ public void receiveChunk(HttpChunk message) { } } catch (final Exception ex) { producer.error(ex); - } finally { message.close(); } } diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java index 8ed6710c8a1e3..282a82dc39fda 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java @@ -44,7 +44,7 @@ public void subscribe(Subscriber s) { } HttpChunk createChunk(HttpContent chunk, boolean last) { - return new ReactorNetty4HttpChunk(chunk.copy().content(), last); + return new ReactorNetty4HttpChunk(chunk.copy().content().retain(), last); } StreamingHttpChannel httpChannel() { diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportStreamingTests.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportStreamingTests.java index a7bf71e58e9b6..df0e4027cc474 100644 --- a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportStreamingTests.java +++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportStreamingTests.java @@ -191,7 +191,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th } catch (final IOException ex) { throw new UncheckedIOException(ex); } - }).collect(Collectors.joining("")))); + }).collect(Collectors.joining("\r\n", "", "\r\n")))); } finally { response.release(); } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java index 6b4496af76dc3..64f510fa1db67 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java @@ -11,6 +11,7 @@ import org.opensearch.action.ActionRequest; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.inject.Module; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; @@ -18,10 +19,13 @@ import org.opensearch.common.settings.SettingsFilter; import org.opensearch.core.action.ActionResponse; import org.opensearch.plugin.wlm.action.CreateQueryGroupAction; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; import org.opensearch.plugin.wlm.action.GetQueryGroupAction; import org.opensearch.plugin.wlm.action.TransportCreateQueryGroupAction; +import org.opensearch.plugin.wlm.action.TransportDeleteQueryGroupAction; import org.opensearch.plugin.wlm.action.TransportGetQueryGroupAction; import org.opensearch.plugin.wlm.rest.RestCreateQueryGroupAction; +import org.opensearch.plugin.wlm.rest.RestDeleteQueryGroupAction; import org.opensearch.plugin.wlm.rest.RestGetQueryGroupAction; import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; import org.opensearch.plugins.ActionPlugin; @@ -29,6 +33,7 @@ import org.opensearch.rest.RestController; import org.opensearch.rest.RestHandler; +import java.util.Collection; import java.util.List; import java.util.function.Supplier; @@ -46,7 +51,8 @@ public WorkloadManagementPlugin() {} public List> getActions() { return List.of( new ActionPlugin.ActionHandler<>(CreateQueryGroupAction.INSTANCE, TransportCreateQueryGroupAction.class), - new ActionPlugin.ActionHandler<>(GetQueryGroupAction.INSTANCE, TransportGetQueryGroupAction.class) + new ActionPlugin.ActionHandler<>(GetQueryGroupAction.INSTANCE, TransportGetQueryGroupAction.class), + new ActionPlugin.ActionHandler<>(DeleteQueryGroupAction.INSTANCE, TransportDeleteQueryGroupAction.class) ); } @@ -60,11 +66,16 @@ public List getRestHandlers( IndexNameExpressionResolver indexNameExpressionResolver, Supplier nodesInCluster ) { - return List.of(new RestCreateQueryGroupAction(), new RestGetQueryGroupAction()); + return List.of(new RestCreateQueryGroupAction(), new RestGetQueryGroupAction(), new RestDeleteQueryGroupAction()); } @Override public List> getSettings() { return List.of(QueryGroupPersistenceService.MAX_QUERY_GROUP_COUNT); } + + @Override + public Collection createGuiceModules() { + return List.of(new WorkloadManagementPluginModule()); + } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java new file mode 100644 index 0000000000000..b7c7805639eb2 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm; + +import org.opensearch.common.inject.AbstractModule; +import org.opensearch.common.inject.Singleton; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; + +/** + * Guice Module to manage WorkloadManagement related objects + */ +public class WorkloadManagementPluginModule extends AbstractModule { + + /** + * Constructor for WorkloadManagementPluginModule + */ + public WorkloadManagementPluginModule() {} + + @Override + protected void configure() { + // Bind QueryGroupPersistenceService as a singleton to ensure a single instance is used, + // preventing multiple throttling key registrations in the constructor. + bind(QueryGroupPersistenceService.class).in(Singleton.class); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java new file mode 100644 index 0000000000000..c78952a2f89ad --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionType; +import org.opensearch.action.support.master.AcknowledgedResponse; + +/** + * Transport action for delete QueryGroup + * + * @opensearch.experimental + */ +public class DeleteQueryGroupAction extends ActionType { + + /** + /** + * An instance of DeleteQueryGroupAction + */ + public static final DeleteQueryGroupAction INSTANCE = new DeleteQueryGroupAction(); + + /** + * Name for DeleteQueryGroupAction + */ + public static final String NAME = "cluster:admin/opensearch/wlm/query_group/_delete"; + + /** + * Default constructor + */ + private DeleteQueryGroupAction() { + super(NAME, AcknowledgedResponse::new); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java new file mode 100644 index 0000000000000..e514943c2c7e9 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Request for delete QueryGroup + * + * @opensearch.experimental + */ +public class DeleteQueryGroupRequest extends AcknowledgedRequest { + private final String name; + + /** + * Default constructor for DeleteQueryGroupRequest + * @param name - name for the QueryGroup to get + */ + public DeleteQueryGroupRequest(String name) { + this.name = name; + } + + /** + * Constructor for DeleteQueryGroupRequest + * @param in - A {@link StreamInput} object + */ + public DeleteQueryGroupRequest(StreamInput in) throws IOException { + super(in); + name = in.readOptionalString(); + } + + @Override + public ActionRequestValidationException validate() { + if (name == null) { + ActionRequestValidationException actionRequestValidationException = new ActionRequestValidationException(); + actionRequestValidationException.addValidationError("QueryGroup name is missing"); + return actionRequestValidationException; + } + return null; + } + + /** + * Name getter + */ + public String getName() { + return name; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalString(name); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java index 01aa8cfb5e610..190ff17261bb4 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java @@ -14,7 +14,6 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; import org.opensearch.tasks.Task; -import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; /** @@ -24,7 +23,6 @@ */ public class TransportCreateQueryGroupAction extends HandledTransportAction { - private final ThreadPool threadPool; private final QueryGroupPersistenceService queryGroupPersistenceService; /** @@ -33,7 +31,6 @@ public class TransportCreateQueryGroupAction extends HandledTransportAction listener) { - threadPool.executor(ThreadPool.Names.SAME) - .execute(() -> queryGroupPersistenceService.persistInClusterStateMetadata(request.getQueryGroup(), listener)); + queryGroupPersistenceService.persistInClusterStateMetadata(request.getQueryGroup(), listener); } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java new file mode 100644 index 0000000000000..e4d3908d4a208 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for delete QueryGroup + * + * @opensearch.experimental + */ +public class TransportDeleteQueryGroupAction extends TransportClusterManagerNodeAction { + + private final QueryGroupPersistenceService queryGroupPersistenceService; + + /** + * Constructor for TransportDeleteQueryGroupAction + * + * @param clusterService - a {@link ClusterService} object + * @param transportService - a {@link TransportService} object + * @param actionFilters - a {@link ActionFilters} object + * @param threadPool - a {@link ThreadPool} object + * @param indexNameExpressionResolver - a {@link IndexNameExpressionResolver} object + * @param queryGroupPersistenceService - a {@link QueryGroupPersistenceService} object + */ + @Inject + public TransportDeleteQueryGroupAction( + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + ThreadPool threadPool, + IndexNameExpressionResolver indexNameExpressionResolver, + QueryGroupPersistenceService queryGroupPersistenceService + ) { + super( + DeleteQueryGroupAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + DeleteQueryGroupRequest::new, + indexNameExpressionResolver + ); + this.queryGroupPersistenceService = queryGroupPersistenceService; + } + + @Override + protected void clusterManagerOperation( + DeleteQueryGroupRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { + queryGroupPersistenceService.deleteInClusterStateMetadata(request, listener); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected AcknowledgedResponse read(StreamInput in) throws IOException { + return new AcknowledgedResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(DeleteQueryGroupRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java new file mode 100644 index 0000000000000..8ad621cf8a1e4 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rest; + +import org.opensearch.client.node.NodeClient; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static org.opensearch.rest.RestRequest.Method.DELETE; + +/** + * Rest action to delete a QueryGroup + * + * @opensearch.experimental + */ +public class RestDeleteQueryGroupAction extends BaseRestHandler { + + /** + * Constructor for RestDeleteQueryGroupAction + */ + public RestDeleteQueryGroupAction() {} + + @Override + public String getName() { + return "delete_query_group"; + } + + /** + * The list of {@link Route}s that this RestHandler is responsible for handling. + */ + @Override + public List routes() { + return List.of(new Route(DELETE, "_wlm/query_group/{name}")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + DeleteQueryGroupRequest deleteQueryGroupRequest = new DeleteQueryGroupRequest(request.param("name")); + deleteQueryGroupRequest.clusterManagerNodeTimeout( + request.paramAsTime("cluster_manager_timeout", deleteQueryGroupRequest.clusterManagerNodeTimeout()) + ); + deleteQueryGroupRequest.timeout(request.paramAsTime("timeout", deleteQueryGroupRequest.timeout())); + return channel -> client.execute(DeleteQueryGroupAction.INSTANCE, deleteQueryGroupRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java index fe7080da78bbe..ba5161a2c855e 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java @@ -10,10 +10,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterManagerTaskThrottler.ThrottlingKey; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; @@ -24,6 +28,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.rest.RestStatus; import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; import org.opensearch.search.ResourceType; import java.util.Collection; @@ -38,6 +43,7 @@ public class QueryGroupPersistenceService { static final String SOURCE = "query-group-persistence-service"; private static final String CREATE_QUERY_GROUP_THROTTLING_KEY = "create-query-group"; + private static final String DELETE_QUERY_GROUP_THROTTLING_KEY = "delete-query-group"; private static final Logger logger = LogManager.getLogger(QueryGroupPersistenceService.class); /** * max QueryGroup count setting name @@ -65,6 +71,7 @@ public class QueryGroupPersistenceService { private final ClusterService clusterService; private volatile int maxQueryGroupCount; final ThrottlingKey createQueryGroupThrottlingKey; + final ThrottlingKey deleteQueryGroupThrottlingKey; /** * Constructor for QueryGroupPersistenceService @@ -81,6 +88,7 @@ public QueryGroupPersistenceService( ) { this.clusterService = clusterService; this.createQueryGroupThrottlingKey = clusterService.registerClusterManagerTask(CREATE_QUERY_GROUP_THROTTLING_KEY, true); + this.deleteQueryGroupThrottlingKey = clusterService.registerClusterManagerTask(DELETE_QUERY_GROUP_THROTTLING_KEY, true); setMaxQueryGroupCount(MAX_QUERY_GROUP_COUNT.get(settings)); clusterSettings.addSettingsUpdateConsumer(MAX_QUERY_GROUP_COUNT, this::setMaxQueryGroupCount); } @@ -212,6 +220,50 @@ public static Collection getFromClusterStateMetadata(String name, Cl .collect(Collectors.toList()); } + /** + * Modify cluster state to delete the QueryGroup + * @param deleteQueryGroupRequest - request to delete a QueryGroup + * @param listener - ActionListener for AcknowledgedResponse + */ + public void deleteInClusterStateMetadata( + DeleteQueryGroupRequest deleteQueryGroupRequest, + ActionListener listener + ) { + clusterService.submitStateUpdateTask(SOURCE, new AckedClusterStateUpdateTask<>(deleteQueryGroupRequest, listener) { + @Override + public ClusterState execute(ClusterState currentState) { + return deleteQueryGroupInClusterState(deleteQueryGroupRequest.getName(), currentState); + } + + @Override + public ClusterManagerTaskThrottler.ThrottlingKey getClusterManagerThrottlingKey() { + return deleteQueryGroupThrottlingKey; + } + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + }); + } + + /** + * Modify cluster state to delete the QueryGroup, and return the new cluster state + * @param name - the name for QueryGroup to be deleted + * @param currentClusterState - current cluster state + */ + ClusterState deleteQueryGroupInClusterState(final String name, final ClusterState currentClusterState) { + final Metadata metadata = currentClusterState.metadata(); + final QueryGroup queryGroupToRemove = metadata.queryGroups() + .values() + .stream() + .filter(queryGroup -> queryGroup.getName().equals(name)) + .findAny() + .orElseThrow(() -> new ResourceNotFoundException("No QueryGroup exists with the provided name: " + name)); + + return ClusterState.builder(currentClusterState).metadata(Metadata.builder(metadata).remove(queryGroupToRemove).build()).build(); + } + /** * maxQueryGroupCount getter */ diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java index 038f015713c5b..ecb9a6b2dc0d2 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java @@ -27,7 +27,7 @@ public class CreateQueryGroupResponseTests extends OpenSearchTestCase { /** - * Test case to verify the serialization and deserialization of CreateQueryGroupResponse. + * Test case to verify serialization and deserialization of CreateQueryGroupResponse. */ public void testSerialization() throws IOException { CreateQueryGroupResponse response = new CreateQueryGroupResponse(QueryGroupTestUtils.queryGroupOne, RestStatus.OK); @@ -46,7 +46,7 @@ public void testSerialization() throws IOException { } /** - * Test case to verify the toXContent method of CreateQueryGroupResponse. + * Test case to validate the toXContent method of CreateQueryGroupResponse. */ public void testToXContentCreateQueryGroup() throws IOException { XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java new file mode 100644 index 0000000000000..bc2e4f0faca4c --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class DeleteQueryGroupRequestTests extends OpenSearchTestCase { + + /** + * Test case to verify the serialization and deserialization of DeleteQueryGroupRequest. + */ + public void testSerialization() throws IOException { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest(QueryGroupTestUtils.NAME_ONE); + assertEquals(QueryGroupTestUtils.NAME_ONE, request.getName()); + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + DeleteQueryGroupRequest otherRequest = new DeleteQueryGroupRequest(streamInput); + assertEquals(request.getName(), otherRequest.getName()); + } + + /** + * Test case to validate a DeleteQueryGroupRequest. + */ + public void testSerializationWithNull() throws IOException { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest((String) null); + ActionRequestValidationException actionRequestValidationException = request.validate(); + assertFalse(actionRequestValidationException.getMessage().isEmpty()); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java new file mode 100644 index 0000000000000..253d65f8da80f --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionListener; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class TransportDeleteQueryGroupActionTests extends OpenSearchTestCase { + + ClusterService clusterService = mock(ClusterService.class); + TransportService transportService = mock(TransportService.class); + ActionFilters actionFilters = mock(ActionFilters.class); + ThreadPool threadPool = mock(ThreadPool.class); + IndexNameExpressionResolver indexNameExpressionResolver = mock(IndexNameExpressionResolver.class); + QueryGroupPersistenceService queryGroupPersistenceService = mock(QueryGroupPersistenceService.class); + + TransportDeleteQueryGroupAction action = new TransportDeleteQueryGroupAction( + clusterService, + transportService, + actionFilters, + threadPool, + indexNameExpressionResolver, + queryGroupPersistenceService + ); + + /** + * Test case to validate the construction for TransportDeleteQueryGroupAction + */ + public void testConstruction() { + assertNotNull(action); + assertEquals(ThreadPool.Names.SAME, action.executor()); + } + + /** + * Test case to validate the clusterManagerOperation function in TransportDeleteQueryGroupAction + */ + public void testClusterManagerOperation() throws Exception { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest("testGroup"); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + ClusterState clusterState = mock(ClusterState.class); + action.clusterManagerOperation(request, clusterState, listener); + verify(queryGroupPersistenceService).deleteInClusterStateMetadata(eq(request), eq(listener)); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java new file mode 100644 index 0000000000000..72191e076bb87 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rest; + +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.List; + +import org.mockito.ArgumentCaptor; + +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_ONE; +import static org.opensearch.rest.RestRequest.Method.DELETE; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + +public class RestDeleteQueryGroupActionTests extends OpenSearchTestCase { + /** + * Test case to validate the construction for RestDeleteQueryGroupAction + */ + public void testConstruction() { + RestDeleteQueryGroupAction action = new RestDeleteQueryGroupAction(); + assertNotNull(action); + assertEquals("delete_query_group", action.getName()); + List routes = action.routes(); + assertEquals(1, routes.size()); + RestHandler.Route route = routes.get(0); + assertEquals(DELETE, route.getMethod()); + assertEquals("_wlm/query_group/{name}", route.getPath()); + } + + /** + * Test case to validate the prepareRequest logic for RestDeleteQueryGroupAction + */ + @SuppressWarnings("unchecked") + public void testPrepareRequest() throws Exception { + RestDeleteQueryGroupAction restDeleteQueryGroupAction = new RestDeleteQueryGroupAction(); + NodeClient nodeClient = mock(NodeClient.class); + RestRequest realRequest = new FakeRestRequest(); + realRequest.params().put("name", NAME_ONE); + ; + RestRequest spyRequest = spy(realRequest); + + doReturn(TimeValue.timeValueSeconds(30)).when(spyRequest).paramAsTime(eq("cluster_manager_timeout"), any(TimeValue.class)); + doReturn(TimeValue.timeValueSeconds(60)).when(spyRequest).paramAsTime(eq("timeout"), any(TimeValue.class)); + + CheckedConsumer consumer = restDeleteQueryGroupAction.prepareRequest(spyRequest, nodeClient); + assertNotNull(consumer); + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(DeleteQueryGroupRequest.class); + ArgumentCaptor> listenerCaptor = ArgumentCaptor.forClass(RestToXContentListener.class); + doNothing().when(nodeClient).execute(eq(DeleteQueryGroupAction.INSTANCE), requestCaptor.capture(), listenerCaptor.capture()); + + consumer.accept(mock(RestChannel.class)); + DeleteQueryGroupRequest capturedRequest = requestCaptor.getValue(); + assertEquals(NAME_ONE, capturedRequest.getName()); + assertEquals(TimeValue.timeValueSeconds(30), capturedRequest.clusterManagerNodeTimeout()); + assertEquals(TimeValue.timeValueSeconds(60), capturedRequest.timeout()); + verify(nodeClient).execute( + eq(DeleteQueryGroupAction.INSTANCE), + any(DeleteQueryGroupRequest.class), + any(RestToXContentListener.class) + ); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java index 2aa3b9e168852..a516ffdde839e 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java @@ -8,6 +8,9 @@ package org.opensearch.plugin.wlm.service; +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; @@ -20,6 +23,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.plugin.wlm.QueryGroupTestUtils; import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; import org.opensearch.search.ResourceType; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; @@ -39,6 +43,7 @@ import static org.opensearch.plugin.wlm.QueryGroupTestUtils.MONITOR_STRING; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_NONE_EXISTED; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_ONE; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_TWO; import static org.opensearch.plugin.wlm.QueryGroupTestUtils._ID_ONE; import static org.opensearch.plugin.wlm.QueryGroupTestUtils._ID_TWO; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.assertEqualQueryGroups; @@ -48,12 +53,14 @@ import static org.opensearch.plugin.wlm.QueryGroupTestUtils.preparePersistenceServiceSetup; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupList; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupOne; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupPersistenceService; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupTwo; import static org.opensearch.plugin.wlm.service.QueryGroupPersistenceService.QUERY_GROUP_COUNT_SETTING_NAME; import static org.opensearch.plugin.wlm.service.QueryGroupPersistenceService.SOURCE; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.argThat; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -298,4 +305,55 @@ public void testMaxQueryGroupCount() { queryGroupPersistenceService.setMaxQueryGroupCount(50); assertEquals(50, queryGroupPersistenceService.getMaxQueryGroupCount()); } + + /** + * Tests delete a single QueryGroup + */ + public void testDeleteSingleQueryGroup() { + ClusterState newClusterState = queryGroupPersistenceService().deleteQueryGroupInClusterState(NAME_TWO, clusterState()); + Map afterDeletionGroups = newClusterState.getMetadata().queryGroups(); + assertFalse(afterDeletionGroups.containsKey(_ID_TWO)); + assertEquals(1, afterDeletionGroups.size()); + List oldQueryGroups = new ArrayList<>(); + oldQueryGroups.add(queryGroupOne); + assertEqualQueryGroups(new ArrayList<>(afterDeletionGroups.values()), oldQueryGroups); + } + + /** + * Tests delete a QueryGroup with invalid name + */ + public void testDeleteNonExistedQueryGroup() { + assertThrows( + ResourceNotFoundException.class, + () -> queryGroupPersistenceService().deleteQueryGroupInClusterState(NAME_NONE_EXISTED, clusterState()) + ); + } + + /** + * Tests DeleteInClusterStateMetadata function + */ + @SuppressWarnings("unchecked") + public void testDeleteInClusterStateMetadata() throws Exception { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest(NAME_ONE); + ClusterService clusterService = mock(ClusterService.class); + + ActionListener listener = mock(ActionListener.class); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + QueryGroupTestUtils.settings(), + clusterSettings() + ); + doAnswer(invocation -> { + AckedClusterStateUpdateTask task = invocation.getArgument(1); + ClusterState initialState = clusterState(); + ClusterState newState = task.execute(initialState); + assertNotNull(newState); + assertEquals(queryGroupPersistenceService.deleteQueryGroupThrottlingKey, task.getClusterManagerThrottlingKey()); + task.onAllNodesAcked(null); + verify(listener).onResponse(argThat(response -> response.isAcknowledged())); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any()); + queryGroupPersistenceService.deleteInClusterStateMetadata(request, listener); + verify(clusterService).submitStateUpdateTask(eq(SOURCE), any(AckedClusterStateUpdateTask.class)); + } } diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json new file mode 100644 index 0000000000000..16930427fc2fe --- /dev/null +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json @@ -0,0 +1,22 @@ +{ + "delete_query_group_context": { + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_wlm/query_group/{name}", + "methods": [ + "DELETE" + ], + "parts": { + "name": { + "type": "string", + "description": "QueryGroup name" + } + } + } + ] + }, + "params":{} + } +} diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml index a22dfa2f4477e..a00314986a5cf 100644 --- a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml @@ -106,3 +106,9 @@ - match: { query_groups.0.resiliency_mode: "monitor" } - match: { query_groups.0.resource_limits.cpu: 0.35 } - match: { query_groups.0.resource_limits.memory: 0.25 } + + - do: + delete_query_group_context: + name: "analytics2" + + - match: { acknowledged: true } diff --git a/release-notes/opensearch.release-notes-1.3.19.md b/release-notes/opensearch.release-notes-1.3.19.md new file mode 100644 index 0000000000000..fe62624fc6362 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.19.md @@ -0,0 +1,5 @@ +## 2024-08-22 Version 1.3.19 Release Notes + +### Upgrades +- OpenJDK Update (July 2024 Patch releases) ([#15002](https://github.com/opensearch-project/OpenSearch/pull/15002)) +- Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml index 91e4127da9c32..e1bc86f1c9f3d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml @@ -88,7 +88,16 @@ setup: } ] } } - + - do: + index: + index: test_partial_flat_object + id: 4 + body: { + "issue": { + "number": 999, + "labels": null + } + } - do: indices.refresh: index: test_partial_flat_object @@ -135,7 +144,7 @@ teardown: } } - - length: { hits.hits: 3 } + - length: { hits.hits: 4 } # Match Query with exact dot path. - do: diff --git a/server/build.gradle b/server/build.gradle index d655796674001..0cc42ad690eab 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -127,7 +127,7 @@ dependencies { api "jakarta.annotation:jakarta.annotation-api:${versions.jakarta_annotation}" // https://mvnrepository.com/artifact/org.roaringbitmap/RoaringBitmap - implementation 'org.roaringbitmap:RoaringBitmap:1.1.0' + implementation 'org.roaringbitmap:RoaringBitmap:1.2.1' testImplementation(project(":test:framework")) { // tests use the locally compiled version of server diff --git a/server/licenses/RoaringBitmap-1.1.0.jar.sha1 b/server/licenses/RoaringBitmap-1.1.0.jar.sha1 deleted file mode 100644 index bf34e11b92710..0000000000000 --- a/server/licenses/RoaringBitmap-1.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9607213861158ae7060234d93ee9c9cb19f494d1 \ No newline at end of file diff --git a/server/licenses/RoaringBitmap-1.2.1.jar.sha1 b/server/licenses/RoaringBitmap-1.2.1.jar.sha1 new file mode 100644 index 0000000000000..ef8cd48c7a388 --- /dev/null +++ b/server/licenses/RoaringBitmap-1.2.1.jar.sha1 @@ -0,0 +1 @@ +828eb489b5e8c8762f2471010e9c7f20c7de596d \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java index 07d6e1379ced8..6a2e7ce4957ae 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java @@ -50,15 +50,22 @@ public class RemoteStatePublicationIT extends RemoteStoreBaseIntegTestCase { private static String INDEX_NAME = "test-index"; + private boolean isRemoteStateEnabled = true; + private String isRemotePublicationEnabled = "true"; @Before public void setup() { asyncUploadMockFsRepo = false; + isRemoteStateEnabled = true; + isRemotePublicationEnabled = "true"; } @Override protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL, "true").build(); + return Settings.builder() + .put(super.featureFlagSettings()) + .put(FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL, isRemotePublicationEnabled) + .build(); } @Override @@ -76,7 +83,7 @@ protected Settings nodeSettings(int nodeOrdinal) { ); return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), isRemoteStateEnabled) .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, routingTableRepoName) .put(routingTableRepoTypeAttributeKey, ReloadableFsRepository.TYPE) .put(routingTableRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) @@ -136,6 +143,18 @@ public void testPublication() throws Exception { } } + public void testRemotePublicationDisableIfRemoteStateDisabled() { + // only disable remote state + isRemoteStateEnabled = false; + // create cluster with multi node with in-consistent settings + prepareCluster(3, 2, INDEX_NAME, 1, 2); + // assert cluster is stable, ensuring publication falls back to legacy transport with inconsistent settings + ensureStableCluster(5); + ensureGreen(INDEX_NAME); + + assertNull(internalCluster().getCurrentClusterManagerNodeInstance(RemoteClusterStateService.class)); + } + private Map getMetadataFiles(BlobStoreRepository repository, String subDirectory) throws IOException { BlobPath metadataPath = repository.basePath() .add( @@ -151,5 +170,4 @@ private Map getMetadataFiles(BlobStoreRepository repository, St return fileName.split(DELIMITER)[0]; }).collect(Collectors.toMap(Function.identity(), key -> 1, Integer::sum)); } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java index 6f5b4bba481dd..52c6c6801a3c2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java @@ -264,15 +264,16 @@ public void testValidCompositeIndex() { ); assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("numeric_dv", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals(2, starTreeFieldType.getMetrics().size()); assertEquals("numeric_dv", starTreeFieldType.getMetrics().get(0).getField()); - List expectedMetrics = Arrays.asList( - MetricStat.AVG, - MetricStat.VALUE_COUNT, - MetricStat.SUM, - MetricStat.MAX, - MetricStat.MIN - ); + + // Assert default metrics + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + + assertEquals("_doc_count", starTreeFieldType.getMetrics().get(1).getField()); + assertEquals(List.of(MetricStat.DOC_COUNT), starTreeFieldType.getMetrics().get(1).getMetrics()); + assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals( StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, @@ -349,13 +350,9 @@ public void testUpdateIndexWhenMappingIsSame() { assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("numeric_dv", starTreeFieldType.getDimensions().get(1).getField()); assertEquals("numeric_dv", starTreeFieldType.getMetrics().get(0).getField()); - List expectedMetrics = Arrays.asList( - MetricStat.AVG, - MetricStat.VALUE_COUNT, - MetricStat.SUM, - MetricStat.MAX, - MetricStat.MIN - ); + + // Assert default metrics + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals( diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 09d5c208a8756..108ef14f0fcb4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -34,6 +34,12 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.Weight; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -56,7 +62,10 @@ import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; import org.opensearch.index.cache.request.RequestCacheStats; +import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; @@ -65,6 +74,7 @@ import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; +import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.time.ZoneId; @@ -768,6 +778,59 @@ public void testDeleteAndCreateSameIndexShardOnSameNode() throws Exception { assertTrue(stats.getMemorySizeInBytes() == 0); } + public void testTimedOutQuery() throws Exception { + // A timed out query should be cached and then invalidated + Client client = client(); + String index = "index"; + assertAcked( + client.admin() + .indices() + .prepareCreate(index) + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + // Disable index refreshing to avoid cache being invalidated mid-test + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1)) + ) + .get() + ); + indexRandom(true, client.prepareIndex(index).setSource("k", "hello")); + ensureSearchable(index); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + forceMerge(client, index); + + QueryBuilder timeoutQueryBuilder = new TermQueryBuilder("k", "hello") { + @Override + protected Query doToQuery(QueryShardContext context) { + return new TermQuery(new Term("k", "hello")) { + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + // Create the weight before sleeping. Otherwise, TermStates.build() (in the call to super.createWeight()) will + // sometimes throw an exception on timeout, rather than timing out gracefully. + Weight result = super.createWeight(searcher, scoreMode, boost); + try { + Thread.sleep(500); + } catch (InterruptedException ignored) {} + return result; + } + }; + } + }; + + SearchResponse resp = client.prepareSearch(index) + .setRequestCache(true) + .setQuery(timeoutQueryBuilder) + .setTimeout(TimeValue.ZERO) + .get(); + assertTrue(resp.isTimedOut()); + RequestCacheStats requestCacheStats = getRequestCacheStats(client, index); + // The cache should be empty as the timed-out query was invalidated + assertEquals(0, requestCacheStats.getMemorySizeInBytes()); + } + private Path[] shardDirectory(String server, Index index, int shard) { NodeEnvironment env = internalCluster().getInstance(NodeEnvironment.class, server); final Path[] paths = env.availableShardPaths(new ShardId(index, shard)); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index f8e5079b01a36..dc0654c623137 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -10,6 +10,7 @@ import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.delete.DeleteResponse; @@ -23,6 +24,7 @@ import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.index.Index; import org.opensearch.core.rest.RestStatus; import org.opensearch.index.IndexService; @@ -31,7 +33,13 @@ import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryData; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.fs.FsRepository; import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.snapshots.SnapshotRestoreException; @@ -46,6 +54,7 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -63,6 +72,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteRestoreSnapshotIT extends AbstractSnapshotIntegTestCase { @@ -746,4 +756,596 @@ public void testInvalidRestoreRequestScenarios() throws Exception { assertTrue(exception.getMessage().contains("cannot remove setting [index.remote_store.segment.repository]" + " on restore")); } + public void testCreateSnapshotV2() throws Exception { + internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String indexName3 = "testindex3"; + String snapshotRepoName = "test-create-snapshot-repo"; + String snapshotName1 = "test-create-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + assertAcked( + client().admin() + .cluster() + .preparePutRepository(snapshotRepoName) + .setType(FsRepository.TYPE) + .setSettings( + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) + ) + ); + + Client client = client(); + Settings indexSettings = getIndexSettings(20, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(15, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 10; + final int numDocsInIndex2 = 20; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + SnapshotInfo snapshotInfo = createSnapshot(snapshotRepoName, snapshotName1, Collections.emptyList()); + assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + assertThat(snapshotInfo.getPinnedTimestamp(), greaterThan(0L)); + + indexDocuments(client, indexName1, 10); + indexDocuments(client, indexName2, 20); + + createIndex(indexName3, indexSettings); + indexDocuments(client, indexName3, 10); + + String snapshotName2 = "test-create-snapshot2"; + + // verify even if waitForCompletion is not true, the request executes in a sync manner + CreateSnapshotResponse createSnapshotResponse2 = client().admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName2) + .get(); + snapshotInfo = createSnapshotResponse2.getSnapshotInfo(); + assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + assertThat(snapshotInfo.snapshotId().getName(), equalTo(snapshotName2)); + assertThat(snapshotInfo.getPinnedTimestamp(), greaterThan(0L)); + + } + + public void testMixedSnapshotCreationWithV2RepositorySetting() throws Exception { + + internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String indexName3 = "testindex3"; + String snapshotRepoName = "test-create-snapshot-repo"; + String snapshotName1 = "test-create-snapshot-v1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + assertAcked( + client().admin() + .cluster() + .preparePutRepository(snapshotRepoName) + .setType(FsRepository.TYPE) + .setSettings( + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), false) + ) + ); + Client client = client(); + Settings indexSettings = getIndexSettings(20, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(15, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 10; + final int numDocsInIndex2 = 20; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + SnapshotInfo snapshotInfo = createSnapshot(snapshotRepoName, snapshotName1, Collections.emptyList()); + assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + assertThat(snapshotInfo.getPinnedTimestamp(), equalTo(0L)); + + // enable shallow_snapshot_v2 + assertAcked( + client().admin() + .cluster() + .preparePutRepository(snapshotRepoName) + .setType(FsRepository.TYPE) + .setSettings( + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) + ) + ); + indexDocuments(client, indexName1, 10); + indexDocuments(client, indexName2, 20); + + createIndex(indexName3, indexSettings); + indexDocuments(client, indexName3, 10); + + String snapshotName2 = "test-create-snapshot-v2"; + + // verify even if waitForCompletion is not true, the request executes in a sync manner + CreateSnapshotResponse createSnapshotResponse2 = client().admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName2) + .get(); + snapshotInfo = createSnapshotResponse2.getSnapshotInfo(); + assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + assertThat(snapshotInfo.snapshotId().getName(), equalTo(snapshotName2)); + assertThat(snapshotInfo.getPinnedTimestamp(), greaterThan(0L)); + + } + + public void testConcurrentSnapshotV2CreateOperation() throws InterruptedException, ExecutionException { + internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-create-snapshot-repo"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + assertAcked( + client().admin() + .cluster() + .preparePutRepository(snapshotRepoName) + .setType(FsRepository.TYPE) + .setSettings( + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) + ) + ); + + Client client = client(); + Settings indexSettings = getIndexSettings(20, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(15, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 10; + final int numDocsInIndex2 = 20; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + int concurrentSnapshots = 5; + + // Prepare threads for concurrent snapshot creation + List threads = new ArrayList<>(); + + for (int i = 0; i < concurrentSnapshots; i++) { + int snapshotIndex = i; + Thread thread = new Thread(() -> { + try { + String snapshotName = "snapshot-concurrent-" + snapshotIndex; + CreateSnapshotResponse createSnapshotResponse2 = client().admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName) + .get(); + SnapshotInfo snapshotInfo = createSnapshotResponse2.getSnapshotInfo(); + assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + assertThat(snapshotInfo.snapshotId().getName(), equalTo(snapshotName)); + assertThat(snapshotInfo.getPinnedTimestamp(), greaterThan(0L)); + } catch (Exception e) {} + }); + threads.add(thread); + } + // start all threads + for (Thread thread : threads) { + thread.start(); + } + + // Wait for all threads to complete + for (Thread thread : threads) { + thread.join(); + } + + // Validate that only one snapshot has been created + Repository repository = internalCluster().getInstance(RepositoriesService.class).repository(snapshotRepoName); + PlainActionFuture repositoryDataPlainActionFuture = new PlainActionFuture<>(); + repository.getRepositoryData(repositoryDataPlainActionFuture); + + RepositoryData repositoryData = repositoryDataPlainActionFuture.get(); + assertThat(repositoryData.getSnapshotIds().size(), greaterThanOrEqualTo(1)); + } + + public void testCreateSnapshotV2WithRedIndex() throws Exception { + internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-create-snapshot-repo"; + String snapshotName1 = "test-create-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + assertAcked( + client().admin() + .cluster() + .preparePutRepository(snapshotRepoName) + .setType(FsRepository.TYPE) + .setSettings( + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) + ) + ); + + Client client = client(); + Settings indexSettings = getIndexSettings(20, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(15, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 10; + final int numDocsInIndex2 = 20; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + internalCluster().ensureAtLeastNumDataNodes(0); + ensureRed(indexName1); + ensureRed(indexName2); + CreateSnapshotResponse createSnapshotResponse2 = client().admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName1) + .get(); + SnapshotInfo snapshotInfo = createSnapshotResponse2.getSnapshotInfo(); + assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + assertThat(snapshotInfo.snapshotId().getName(), equalTo(snapshotName1)); + assertThat(snapshotInfo.getPinnedTimestamp(), greaterThan(0L)); + } + + public void testCreateSnapshotV2WithIndexingLoad() throws Exception { + internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-create-snapshot-repo"; + String snapshotName1 = "test-create-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + assertAcked( + client().admin() + .cluster() + .preparePutRepository(snapshotRepoName) + .setType(FsRepository.TYPE) + .setSettings( + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) + ) + ); + + Client client = client(); + Settings indexSettings = getIndexSettings(20, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(15, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 10; + final int numDocsInIndex2 = 20; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + Thread indexingThread = new Thread(() -> { + try { + for (int i = 0; i < 50; i++) { + internalCluster().client().prepareIndex("test-index-load").setSource("field", "value" + i).execute().actionGet(); + } + } catch (Exception e) { + fail("indexing failed due to exception: " + e.getMessage()); + } + }); + + // Start indexing + indexingThread.start(); + + // Wait for a bit to let some documents be indexed + Thread.sleep(1000); + + // Create a snapshot while indexing is ongoing + CreateSnapshotResponse createSnapshotResponse2 = client().admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName1) + .get(); + + SnapshotInfo snapshotInfo = createSnapshotResponse2.getSnapshotInfo(); + assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + assertThat(snapshotInfo.snapshotId().getName(), equalTo(snapshotName1)); + assertThat(snapshotInfo.getPinnedTimestamp(), greaterThan(0L)); + assertTrue(snapshotInfo.indices().contains("test-index-load")); + assertTrue(snapshotInfo.indices().contains(indexName1)); + assertTrue(snapshotInfo.indices().contains(indexName2)); + indexingThread.join(); + + } + + public void testCreateSnapshotV2WithShallowCopySettingDisabled() throws Exception { + internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-create-snapshot-repo"; + String snapshotName1 = "test-create-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + assertAcked( + client().admin() + .cluster() + .preparePutRepository(snapshotRepoName) + .setType(FsRepository.TYPE) + .setSettings( + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), false) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) + ) + ); + + Client client = client(); + Settings indexSettings = getIndexSettings(20, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(15, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 10; + final int numDocsInIndex2 = 20; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + // Will create full copy snapshot if `REMOTE_STORE_INDEX_SHALLOW_COPY` is false but `SHALLOW_SNAPSHOT_V2` is true + SnapshotInfo snapshotInfo = createSnapshot(snapshotRepoName, snapshotName1, Collections.emptyList()); + assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + assertThat(snapshotInfo.getPinnedTimestamp(), equalTo(0L)); + + // Validate that snapshot is present in repository data + Repository repository = internalCluster().getInstance(RepositoriesService.class).repository(snapshotRepoName); + PlainActionFuture repositoryDataPlainActionFuture = new PlainActionFuture<>(); + repository.getRepositoryData(repositoryDataPlainActionFuture); + + RepositoryData repositoryData = repositoryDataPlainActionFuture.get(); + assertTrue(repositoryData.getSnapshotIds().contains(snapshotInfo.snapshotId())); + } + + public void testClusterManagerFailoverDuringSnapshotCreation() throws Exception { + + internalCluster().startClusterManagerOnlyNodes(3, pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-create-snapshot-repo"; + String snapshotName1 = "test-create-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + assertAcked( + client().admin() + .cluster() + .preparePutRepository(snapshotRepoName) + .setType(FsRepository.TYPE) + .setSettings( + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) + ) + ); + + Client client = client(); + Settings indexSettings = getIndexSettings(20, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(15, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 10; + final int numDocsInIndex2 = 20; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + ensureStableCluster(4, internalCluster().getClusterManagerName()); + + final SnapshotInfo[] snapshotInfo = new SnapshotInfo[1]; + final Boolean[] snapshotFailed = new Boolean[1]; + snapshotFailed[0] = false; + Thread snapshotThread = new Thread(() -> { + try { + // Start snapshot creation + CreateSnapshotResponse createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName1) + .get(); + snapshotInfo[0] = createSnapshotResponse.getSnapshotInfo(); + + } catch (Exception e) { + snapshotFailed[0] = true; + } + }); + snapshotThread.start(); + Thread.sleep(100); + + internalCluster().stopCurrentClusterManagerNode(); + + // Wait for the cluster to elect a new Cluster Manager and stabilize + ensureStableCluster(3, internalCluster().getClusterManagerName()); + + // Wait for the snapshot thread to complete + snapshotThread.join(); + + // Validate that the snapshot was created or handled gracefully + Repository repository = internalCluster().getInstance(RepositoriesService.class).repository(snapshotRepoName); + PlainActionFuture repositoryDataPlainActionFuture = new PlainActionFuture<>(); + repository.getRepositoryData(repositoryDataPlainActionFuture); + + RepositoryData repositoryData = repositoryDataPlainActionFuture.get(); + if (snapshotFailed[0]) { + assertFalse(repositoryData.getSnapshotIds().contains(snapshotInfo[0].snapshotId())); + } else { + assertTrue(repositoryData.getSnapshotIds().contains(snapshotInfo[0].snapshotId())); + } + } + + public void testConcurrentV1SnapshotAndV2RepoSettingUpdate() throws Exception { + internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + String snapshotRepoName = "test-create-snapshot-repo"; + String snapshotName1 = "test-create-snapshot-v1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + assertAcked( + client().admin() + .cluster() + .preparePutRepository(snapshotRepoName) + .setType(FsRepository.TYPE) + .setSettings( + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), false) + ) + ); + Client client = client(); + Settings indexSettings = getIndexSettings(20, 0).build(); + + for (int i = 0; i < 10; i++) { + createIndex("index" + i, indexSettings); + } + ensureStableCluster(3); + for (int i = 0; i < 10; i++) { + indexDocuments(client, "index" + i, 15); + } + + ensureStableCluster(3); + for (int i = 0; i < 10; i++) { + ensureGreen("index" + i); + } + final CreateSnapshotResponse[] snapshotV1Response = new CreateSnapshotResponse[1]; + // Create a separate thread to create the first snapshot + Thread createV1SnapshotThread = new Thread(() -> { + try { + snapshotV1Response[0] = client().admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .get(); + + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + + // Create a separate thread to enable shallow_snapshot_v2 + Thread enableV2Thread = new Thread(() -> { + try { + + assertThrows( + IllegalStateException.class, + () -> client().admin() + .cluster() + .preparePutRepository(snapshotRepoName) + .setType(FsRepository.TYPE) + .setSettings( + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) + ) + .get() + ); + + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + + createV1SnapshotThread.start(); + + Thread.sleep(100); + + enableV2Thread.start(); + + enableV2Thread.join(); + createV1SnapshotThread.join(); + } + + private Settings pinnedTimestampSettings() { + Settings settings = Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), true) + .build(); + return settings; + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 194dce5f4a57a..a327b683874f6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -217,10 +217,15 @@ public void testStaleCommitDeletionWithInvokeFlush() throws Exception { } else { // As delete is async its possible that the file gets created before the deletion or after // deletion. - MatcherAssert.assertThat( - actualFileCount, - is(oneOf(lastNMetadataFilesToKeep - 1, lastNMetadataFilesToKeep, lastNMetadataFilesToKeep + 1)) - ); + if (RemoteStoreSettings.isPinnedTimestampsEnabled()) { + // With pinned timestamp, we also keep md files since last successful fetch + assertTrue(actualFileCount >= lastNMetadataFilesToKeep); + } else { + MatcherAssert.assertThat( + actualFileCount, + is(oneOf(lastNMetadataFilesToKeep - 1, lastNMetadataFilesToKeep, lastNMetadataFilesToKeep + 1)) + ); + } } }, 30, TimeUnit.SECONDS); } @@ -249,7 +254,12 @@ public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); int actualFileCount = getFileCount(indexPath); // We also allow (numberOfIterations + 1) as index creation also triggers refresh. - MatcherAssert.assertThat(actualFileCount, is(oneOf(4))); + if (RemoteStoreSettings.isPinnedTimestampsEnabled()) { + // With pinned timestamp, we also keep md files since last successful fetch + assertTrue(actualFileCount >= 4); + } else { + assertEquals(4, actualFileCount); + } } public void testStaleCommitDeletionWithMinSegmentFiles_Disabled() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java index 05ff738d2df0b..cb91c63e17245 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java @@ -9,8 +9,10 @@ package org.opensearch.remotestore; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; import org.opensearch.test.OpenSearchIntegTestCase; @@ -20,6 +22,14 @@ public class RemoteStorePinnedTimestampsIT extends RemoteStoreBaseIntegTestCase { static final String INDEX_NAME = "remote-store-test-idx-1"; + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), true) + .build(); + } + ActionListener noOpActionListener = new ActionListener<>() { @Override public void onResponse(Void unused) {} diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java index e688a4491b1a7..2331d52c3a1bc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java @@ -19,6 +19,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.store.RemoteBufferedOutputDirectory; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -287,8 +288,14 @@ public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { public void testRemoteStoreCleanupForDeletedIndex() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); final Path remoteStoreRepoPath = randomRepoPath(); - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); - internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + Settings settings = remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath); + // Disabling pinned timestamp as this test is specifically for shallow snapshot. + settings = Settings.builder() + .put(settings) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), false) + .build(); + internalCluster().startClusterManagerOnlyNode(settings); + internalCluster().startDataOnlyNode(settings); final Client clusterManagerClient = internalCluster().clusterManagerClient(); ensureStableCluster(2); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java index 0eb37703eb0f1..0bebe969b3f3e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -127,6 +128,7 @@ public void finalizeSnapshot( SnapshotInfo snapshotInfo, Version repositoryMetaVersion, Function stateTransformer, + Priority repositoryUpdatePriority, ActionListener listener ) { super.finalizeSnapshot( @@ -136,6 +138,7 @@ public void finalizeSnapshot( snapshotInfo, repositoryMetaVersion, stateTransformer, + repositoryUpdatePriority, listener ); } diff --git a/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java new file mode 100644 index 0000000000000..67ee45f4c9306 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.codecs.lucene90; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.index.SegmentWriteState; + +import java.io.Closeable; +import java.io.IOException; + +/** + * This class is an abstraction of the {@link DocValuesConsumer} for the Star Tree index structure. + * It is responsible to consume various types of document values (numeric, binary, sorted, sorted numeric, + * and sorted set) for fields in the Star Tree index. + * + * @opensearch.experimental + */ +public class Lucene90DocValuesConsumerWrapper implements Closeable { + + private final Lucene90DocValuesConsumer lucene90DocValuesConsumer; + + public Lucene90DocValuesConsumerWrapper( + SegmentWriteState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + lucene90DocValuesConsumer = new Lucene90DocValuesConsumer(state, dataCodec, dataExtension, metaCodec, metaExtension); + } + + public Lucene90DocValuesConsumer getLucene90DocValuesConsumer() { + return lucene90DocValuesConsumer; + } + + @Override + public void close() throws IOException { + lucene90DocValuesConsumer.close(); + } +} diff --git a/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java new file mode 100644 index 0000000000000..a213852c59094 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.codecs.lucene90; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.SegmentReadState; + +import java.io.Closeable; +import java.io.IOException; + +/** + * This class is a custom abstraction of the {@link DocValuesProducer} for the Star Tree index structure. + * It is responsible for providing access to various types of document values (numeric, binary, sorted, sorted numeric, + * and sorted set) for fields in the Star Tree index. + * + * @opensearch.experimental + */ +public class Lucene90DocValuesProducerWrapper implements Closeable { + + private final Lucene90DocValuesProducer lucene90DocValuesProducer; + + public Lucene90DocValuesProducerWrapper( + SegmentReadState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + lucene90DocValuesProducer = new Lucene90DocValuesProducer(state, dataCodec, dataExtension, metaCodec, metaExtension); + } + + public DocValuesProducer getLucene90DocValuesProducer() { + return lucene90DocValuesProducer; + } + + @Override + public void close() throws IOException { + lucene90DocValuesProducer.close(); + } +} diff --git a/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java b/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java new file mode 100644 index 0000000000000..f7759fcced284 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.index; + +import org.apache.lucene.util.Counter; + +/** + * A wrapper class for writing sorted numeric doc values. + *

+ * This class provides a convenient way to add sorted numeric doc values to a field + * and retrieve the corresponding {@link SortedNumericDocValues} instance. + * + * @opensearch.experimental + */ +public class SortedNumericDocValuesWriterWrapper { + + private final SortedNumericDocValuesWriter sortedNumericDocValuesWriter; + + /** + * Sole constructor. Constructs a new {@link SortedNumericDocValuesWriterWrapper} instance. + * + * @param fieldInfo the field information for the field being written + * @param counter a counter for tracking memory usage + */ + public SortedNumericDocValuesWriterWrapper(FieldInfo fieldInfo, Counter counter) { + sortedNumericDocValuesWriter = new SortedNumericDocValuesWriter(fieldInfo, counter); + } + + /** + * Adds a value to the sorted numeric doc values for the specified document. + * + * @param docID the document ID + * @param value the value to add + */ + public void addValue(int docID, long value) { + sortedNumericDocValuesWriter.addValue(docID, value); + } + + /** + * Returns the {@link SortedNumericDocValues} instance containing the sorted numeric doc values + * + * @return the {@link SortedNumericDocValues} instance + */ + public SortedNumericDocValues getDocValues() { + return sortedNumericDocValuesWriter.getDocValues(); + } +} diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 574b7029a6501..c86e6580122d5 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -1200,9 +1200,12 @@ public void unregisterDynamicRoute(NamedRoute route) { * @param route The {@link RestHandler.Route}. * @return the corresponding {@link RestSendToExtensionAction} if it is registered, null otherwise. */ - @SuppressWarnings("unchecked") public RestSendToExtensionAction get(RestHandler.Route route) { - return routeRegistry.get(route); + if (route instanceof NamedRoute) { + return routeRegistry.get((NamedRoute) route); + } + // Only NamedRoutes are map keys so any other route is not in the map + return null; } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index bb3bf014f213b..25e71d5598a98 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -42,12 +42,16 @@ import org.opensearch.common.inject.Inject; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; import org.opensearch.snapshots.SnapshotsService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import java.io.IOException; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SHALLOW_SNAPSHOT_V2; + /** * Transport action for create snapshot operation * @@ -56,12 +60,15 @@ public class TransportCreateSnapshotAction extends TransportClusterManagerNodeAction { private final SnapshotsService snapshotsService; + private final RepositoriesService repositoriesService; + @Inject public TransportCreateSnapshotAction( TransportService transportService, ClusterService clusterService, ThreadPool threadPool, SnapshotsService snapshotsService, + RepositoriesService repositoriesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver ) { @@ -75,6 +82,7 @@ public TransportCreateSnapshotAction( indexNameExpressionResolver ); this.snapshotsService = snapshotsService; + this.repositoriesService = repositoriesService; } @Override @@ -103,7 +111,9 @@ protected void clusterManagerOperation( ClusterState state, final ActionListener listener ) { - if (request.waitForCompletion()) { + Repository repository = repositoriesService.repository(request.repository()); + boolean isSnapshotV2 = SHALLOW_SNAPSHOT_V2.get(repository.getMetadata().settings()); + if (request.waitForCompletion() || isSnapshotV2) { snapshotsService.executeSnapshot(request, ActionListener.map(listener, CreateSnapshotResponse::new)); } else { snapshotsService.createSnapshot(request, ActionListener.map(listener, snapshot -> new CreateSnapshotResponse())); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index 7fa63ae8abc62..c7820c2c9a365 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -105,7 +105,8 @@ public CoordinationState( .getLastAcceptedConfiguration(); this.publishVotes = new VoteCollection(); this.isRemoteStateEnabled = isRemoteStoreClusterStateEnabled(settings); - this.isRemotePublicationEnabled = FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) + this.isRemotePublicationEnabled = isRemoteStateEnabled + && FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) && localNode.isRemoteStatePublicationEnabled(); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 4da6c68b40733..6163fd624c838 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -1397,7 +1397,14 @@ public Builder put(final QueryGroup queryGroup) { return queryGroups(existing); } - public Map getQueryGroups() { + public Builder remove(final QueryGroup queryGroup) { + Objects.requireNonNull(queryGroup, "queryGroup should not be null"); + Map existing = new HashMap<>(getQueryGroups()); + existing.remove(queryGroup.get_id()); + return queryGroups(existing); + } + + private Map getQueryGroups() { return Optional.ofNullable(this.customs.get(QueryGroupMetadata.TYPE)) .map(o -> (QueryGroupMetadata) o) .map(QueryGroupMetadata::queryGroups) @@ -1830,9 +1837,7 @@ static void validateDataStreams(SortedMap indicesLooku if (dsMetadata != null) { for (DataStream ds : dsMetadata.dataStreams().values()) { String prefix = DataStream.BACKING_INDEX_PREFIX + ds.getName() + "-"; - Set conflicts = indicesLookup.subMap(prefix, DataStream.BACKING_INDEX_PREFIX + ds.getName() + ".") // '.' is the - // char after - // '-' + Set conflicts = indicesLookup.subMap(prefix, DataStream.BACKING_INDEX_PREFIX + ds.getName() + ".") .keySet() .stream() .filter(s -> NUMBER_PATTERN.matcher(s.substring(prefix.length())).matches()) diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 49ef87838ed2e..8daf9125bb27e 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -761,6 +761,7 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA, RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_SCHEDULER_INTERVAL, RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_LOOKBACK_INTERVAL, + RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED, SearchService.CLUSTER_ALLOW_DERIVED_FIELD_SETTING, diff --git a/server/src/main/java/org/opensearch/common/xcontent/support/XContentHttpChunk.java b/server/src/main/java/org/opensearch/common/xcontent/support/XContentHttpChunk.java index 15b63a0ac2030..a7f1d30cd05dd 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/support/XContentHttpChunk.java +++ b/server/src/main/java/org/opensearch/common/xcontent/support/XContentHttpChunk.java @@ -12,6 +12,7 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.bytes.CompositeBytesReference; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.http.HttpChunk; @@ -19,6 +20,7 @@ * Wraps the instance of the {@link XContentBuilder} into {@link HttpChunk} */ public final class XContentHttpChunk implements HttpChunk { + private static final byte[] CHUNK_SEPARATOR = new byte[] { '\r', '\n' }; private final BytesReference content; /** @@ -42,7 +44,8 @@ private XContentHttpChunk(@Nullable final XContentBuilder builder) { if (builder == null /* no content */) { content = BytesArray.EMPTY; } else { - content = BytesReference.bytes(builder); + // Always finalize the output chunk with '\r\n' sequence + content = CompositeBytesReference.of(BytesReference.bytes(builder), new BytesArray(CHUNK_SEPARATOR)); } } diff --git a/server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java b/server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java index 3acedc6a27d7f..99691d7061ac9 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java @@ -12,6 +12,7 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene99.Lucene99Codec; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; import org.opensearch.index.mapper.MapperService; import java.util.HashMap; @@ -29,6 +30,10 @@ */ @ExperimentalApi public class CompositeCodecFactory { + + // we can use this to track the latest composite codec + public static final String COMPOSITE_CODEC = Composite99Codec.COMPOSITE_INDEX_CODEC_NAME; + public CompositeCodecFactory() {} public Map getCompositeIndexCodecs(MapperService mapperService, Logger logger) { diff --git a/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactory.java b/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactory.java new file mode 100644 index 0000000000000..1ed672870337e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactory.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.codecs.lucene90.Lucene90DocValuesConsumerWrapper; +import org.apache.lucene.index.SegmentWriteState; + +import java.io.IOException; + +/** + * A factory class that provides a factory method for creating {@link DocValuesConsumer} instances + * for the latest composite codec. + *

+ * The segments are written using the latest composite codec. The codec + * internally manages calling the appropriate consumer factory for its abstractions. + *

+ * This design ensures forward compatibility for writing operations + * + * @opensearch.experimental + */ +public class LuceneDocValuesConsumerFactory { + + public static DocValuesConsumer getDocValuesConsumerForCompositeCodec( + SegmentWriteState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + try ( + Lucene90DocValuesConsumerWrapper lucene90DocValuesConsumerWrapper = new Lucene90DocValuesConsumerWrapper( + state, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ) + ) { + return lucene90DocValuesConsumerWrapper.getLucene90DocValuesConsumer(); + } + } + +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactory.java b/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactory.java new file mode 100644 index 0000000000000..611a97ffeb834 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactory.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.codecs.lucene90.Lucene90DocValuesProducerWrapper; +import org.apache.lucene.index.SegmentReadState; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; + +import java.io.IOException; + +/** + * A factory class that provides a factory method for creating {@link DocValuesProducer} instances + * based on the specified composite codec. + *

+ * In producers, we want to ensure compatibility with older codec versions during the segment reads. + * This approach allows for writing with only the latest codec while maintaining + * the ability to read data encoded with any codec version present in the segment. + *

+ * This design ensures backward compatibility for reads across different codec versions. + * + * @opensearch.experimental + */ +public class LuceneDocValuesProducerFactory { + + public static DocValuesProducer getDocValuesProducerForCompositeCodec( + String compositeCodec, + SegmentReadState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + + switch (compositeCodec) { + case Composite99Codec.COMPOSITE_INDEX_CODEC_NAME: + try ( + Lucene90DocValuesProducerWrapper lucene90DocValuesProducerWrapper = new Lucene90DocValuesProducerWrapper( + state, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ) + ) { + return lucene90DocValuesProducerWrapper.getLucene90DocValuesProducer(); + } + default: + throw new IllegalStateException("Invalid composite codec " + "[" + compositeCodec + "]"); + } + + } + +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99Codec.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99Codec.java similarity index 97% rename from server/src/main/java/org/opensearch/index/codec/composite/Composite99Codec.java rename to server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99Codec.java index de04944e67cd2..8422932e937c2 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99Codec.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99Codec.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.index.codec.composite; +package org.opensearch.index.codec.composite.composite99; import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesFormat.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesFormat.java similarity index 97% rename from server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesFormat.java rename to server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesFormat.java index 216ed4f68f333..e8c69b11b7c88 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesFormat.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesFormat.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.index.codec.composite; +package org.opensearch.index.codec.composite.composite99; import org.apache.lucene.codecs.DocValuesConsumer; import org.apache.lucene.codecs.DocValuesFormat; diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesReader.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesReader.java similarity index 91% rename from server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesReader.java rename to server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesReader.java index df5008a7f294e..e3bfe01cfa2d5 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesReader.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesReader.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.index.codec.composite; +package org.opensearch.index.codec.composite.composite99; import org.apache.lucene.codecs.DocValuesProducer; import org.apache.lucene.index.BinaryDocValues; @@ -17,6 +17,9 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.codec.composite.CompositeIndexValues; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesWriter.java similarity index 82% rename from server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java rename to server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesWriter.java index 6ed1a8c42e380..74ab7d423998e 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesWriter.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.index.codec.composite; +package org.opensearch.index.codec.composite.composite99; import org.apache.lucene.codecs.DocValuesConsumer; import org.apache.lucene.codecs.DocValuesProducer; @@ -15,13 +15,18 @@ import org.apache.lucene.index.EmptyDocValuesProducer; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.MergeState; +import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.SortedNumericDocValues; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.codec.composite.CompositeIndexValues; import org.opensearch.index.codec.composite.datacube.startree.StarTreeValues; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.builder.StarTreesBuilder; import org.opensearch.index.mapper.CompositeMappedFieldType; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.MapperService; import java.io.IOException; @@ -60,21 +65,29 @@ public Composite99DocValuesWriter(DocValuesConsumer delegate, SegmentWriteState this.compositeMappedFieldTypes = mapperService.getCompositeFieldTypes(); compositeFieldSet = new HashSet<>(); segmentFieldSet = new HashSet<>(); + // TODO : add integ test for this for (FieldInfo fi : segmentWriteState.fieldInfos) { if (DocValuesType.SORTED_NUMERIC.equals(fi.getDocValuesType())) { segmentFieldSet.add(fi.name); + } else if (fi.name.equals(DocCountFieldMapper.NAME)) { + segmentFieldSet.add(fi.name); } } for (CompositeMappedFieldType type : compositeMappedFieldTypes) { compositeFieldSet.addAll(type.fields()); } // check if there are any composite fields which are part of the segment + // TODO : add integ test where there are no composite fields in a segment, test both flush and merge cases segmentHasCompositeFields = Collections.disjoint(segmentFieldSet, compositeFieldSet) == false; } @Override public void addNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { delegate.addNumericField(field, valuesProducer); + // Perform this only during flush flow + if (mergeState.get() == null && segmentHasCompositeFields) { + createCompositeIndicesIfPossible(valuesProducer, field); + } } @Override @@ -116,13 +129,7 @@ private void createCompositeIndicesIfPossible(DocValuesProducer valuesProducer, if (segmentFieldSet.isEmpty()) { Set compositeFieldSetCopy = new HashSet<>(compositeFieldSet); for (String compositeField : compositeFieldSetCopy) { - fieldProducerMap.put(compositeField, new EmptyDocValuesProducer() { - @Override - public SortedNumericDocValues getSortedNumeric(FieldInfo field) { - return DocValues.emptySortedNumeric(); - } - }); - compositeFieldSet.remove(compositeField); + addDocValuesForEmptyField(compositeField); } } // we have all the required fields to build composite fields @@ -135,7 +142,28 @@ public SortedNumericDocValues getSortedNumeric(FieldInfo field) { } } } + } + /** + * Add empty doc values for fields not present in segment + */ + private void addDocValuesForEmptyField(String compositeField) { + if (compositeField.equals(DocCountFieldMapper.NAME)) { + fieldProducerMap.put(compositeField, new EmptyDocValuesProducer() { + @Override + public NumericDocValues getNumeric(FieldInfo field) { + return DocValues.emptyNumeric(); + } + }); + } else { + fieldProducerMap.put(compositeField, new EmptyDocValuesProducer() { + @Override + public SortedNumericDocValues getSortedNumeric(FieldInfo field) { + return DocValues.emptySortedNumeric(); + } + }); + } + compositeFieldSet.remove(compositeField); } @Override diff --git a/server/src/main/java/org/opensearch/index/codec/composite/composite99/package-info.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/package-info.java new file mode 100644 index 0000000000000..3d6f130b9a7c8 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Responsible for handling all composite index codecs and operations associated with Composite99 codec + */ +package org.opensearch.index.codec.composite.composite99; diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java index df3b2229d2c5b..1522078024b64 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java @@ -10,6 +10,9 @@ import org.opensearch.common.annotation.ExperimentalApi; +import java.util.Arrays; +import java.util.List; + /** * Supported metric types for composite index * @@ -18,24 +21,56 @@ @ExperimentalApi public enum MetricStat { VALUE_COUNT("value_count"), - AVG("avg"), SUM("sum"), MIN("min"), - MAX("max"); + MAX("max"), + AVG("avg", VALUE_COUNT, SUM), + DOC_COUNT("doc_count", true); private final String typeName; + private final MetricStat[] baseMetrics; + + // System field stats cannot be used as input for user metric types + private final boolean isSystemFieldStat; MetricStat(String typeName) { + this(typeName, false); + } + + MetricStat(String typeName, MetricStat... baseMetrics) { + this(typeName, false, baseMetrics); + } + + MetricStat(String typeName, boolean isSystemFieldStat, MetricStat... baseMetrics) { this.typeName = typeName; + this.isSystemFieldStat = isSystemFieldStat; + this.baseMetrics = baseMetrics; } public String getTypeName() { return typeName; } + /** + * Return the list of metrics that this metric is derived from + * For example, AVG is derived from COUNT and SUM + */ + public List getBaseMetrics() { + return Arrays.asList(baseMetrics); + } + + /** + * Return true if this metric is derived from other metrics + * For example, AVG is derived from COUNT and SUM + */ + public boolean isDerivedMetric() { + return baseMetrics != null && baseMetrics.length > 0; + } + public static MetricStat fromTypeName(String typeName) { for (MetricStat metric : MetricStat.values()) { - if (metric.getTypeName().equalsIgnoreCase(typeName)) { + // prevent system fields to be entered as user input + if (metric.getTypeName().equalsIgnoreCase(typeName) && metric.isSystemFieldStat == false) { return metric; } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java index 6535f8ed11da3..ce389a99b3626 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java @@ -15,6 +15,7 @@ import java.util.Arrays; import java.util.List; +import java.util.function.Function; /** * Index settings for star tree fields. The settings are final as right now @@ -93,16 +94,10 @@ public class StarTreeIndexSettings { /** * Default metrics for metrics as part of star tree fields */ - public static final Setting> DEFAULT_METRICS_LIST = Setting.listSetting( + public static final Setting> DEFAULT_METRICS_LIST = Setting.listSetting( "index.composite_index.star_tree.field.default.metrics", - Arrays.asList( - MetricStat.AVG.toString(), - MetricStat.VALUE_COUNT.toString(), - MetricStat.SUM.toString(), - MetricStat.MAX.toString(), - MetricStat.MIN.toString() - ), - MetricStat::fromTypeName, + Arrays.asList(MetricStat.VALUE_COUNT.toString(), MetricStat.SUM.toString()), + Function.identity(), Setting.Property.IndexScope, Setting.Property.Final ); diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeValidator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeValidator.java index cbed46604681d..203bca3f1c292 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeValidator.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeValidator.java @@ -14,6 +14,7 @@ import org.opensearch.index.compositeindex.datacube.Dimension; import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.mapper.CompositeMappedFieldType; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.StarTreeMapper; @@ -78,7 +79,7 @@ public static void validate(MapperService mapperService, CompositeIndexSettings String.format(Locale.ROOT, "unknown metric field [%s] as part of star tree field", metric.getField()) ); } - if (ft.isAggregatable() == false) { + if (ft.isAggregatable() == false && ft instanceof DocCountFieldMapper.DocCountFieldType == false) { throw new IllegalArgumentException( String.format( Locale.ROOT, diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java index 81807cd174a10..e79abe0f170b3 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java @@ -17,12 +17,9 @@ class CountValueAggregator implements ValueAggregator { public static final long DEFAULT_INITIAL_VALUE = 1L; - private final StarTreeNumericType starTreeNumericType; private static final StarTreeNumericType VALUE_AGGREGATOR_TYPE = StarTreeNumericType.LONG; - public CountValueAggregator(StarTreeNumericType starTreeNumericType) { - this.starTreeNumericType = starTreeNumericType; - } + public CountValueAggregator() {} @Override public StarTreeNumericType getAggregatedValueType() { diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregator.java new file mode 100644 index 0000000000000..0896fa54e9f46 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregator.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.aggregators; + +import org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype.StarTreeNumericType; + +/** + * Aggregator to handle '_doc_count' field + * + * @opensearch.experimental + */ +public class DocCountAggregator implements ValueAggregator { + + private static final StarTreeNumericType VALUE_AGGREGATOR_TYPE = StarTreeNumericType.LONG; + + public DocCountAggregator() {} + + @Override + public StarTreeNumericType getAggregatedValueType() { + return VALUE_AGGREGATOR_TYPE; + } + + /** + * If _doc_count field for a doc is missing, we increment the _doc_count by '1' for the associated doc + * otherwise take the actual value present in the field + */ + @Override + public Long getInitialAggregatedValueForSegmentDocValue(Long segmentDocValue) { + if (segmentDocValue == null) { + return getIdentityMetricValue(); + } + return segmentDocValue; + } + + @Override + public Long mergeAggregatedValueAndSegmentValue(Long value, Long segmentDocValue) { + assert value != null; + return mergeAggregatedValues(value, segmentDocValue); + } + + @Override + public Long mergeAggregatedValues(Long value, Long aggregatedValue) { + if (value == null) { + value = getIdentityMetricValue(); + } + if (aggregatedValue == null) { + aggregatedValue = getIdentityMetricValue(); + } + return value + aggregatedValue; + } + + @Override + public Long toAggregatedValueType(Long rawValue) { + return rawValue; + } + + /** + * If _doc_count field for a doc is missing, we increment the _doc_count by '1' for the associated doc + */ + @Override + public Long getIdentityMetricValue() { + return 1L; + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java index ef5b773d81d27..bdc381110365d 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java @@ -31,11 +31,13 @@ public static ValueAggregator getValueAggregator(MetricStat aggregationType, Sta case SUM: return new SumValueAggregator(starTreeNumericType); case VALUE_COUNT: - return new CountValueAggregator(starTreeNumericType); + return new CountValueAggregator(); case MIN: return new MinValueAggregator(starTreeNumericType); case MAX: return new MaxValueAggregator(starTreeNumericType); + case DOC_COUNT: + return new DocCountAggregator(); default: throw new IllegalStateException("Unsupported aggregation type: " + aggregationType); } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java index 90b2d0727d572..ddcf02cc6291a 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.DocValues; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexOptions; @@ -28,6 +29,7 @@ import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; import org.opensearch.index.compositeindex.datacube.startree.utils.TreeNode; import org.opensearch.index.fielddata.IndexNumericFieldData; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.NumberFieldMapper; @@ -117,7 +119,20 @@ protected BaseStarTreeBuilder(StarTreeField starTreeField, SegmentWriteState sta public List generateMetricAggregatorInfos(MapperService mapperService) { List metricAggregatorInfos = new ArrayList<>(); for (Metric metric : this.starTreeField.getMetrics()) { + if (metric.getField().equals(DocCountFieldMapper.NAME)) { + MetricAggregatorInfo metricAggregatorInfo = new MetricAggregatorInfo( + MetricStat.DOC_COUNT, + metric.getField(), + starTreeField.getName(), + IndexNumericFieldData.NumericType.LONG + ); + metricAggregatorInfos.add(metricAggregatorInfo); + continue; + } for (MetricStat metricStat : metric.getMetrics()) { + if (metricStat.isDerivedMetric()) { + continue; + } IndexNumericFieldData.NumericType numericType; Mapper fieldMapper = mapperService.documentMapper().mappers().getMapper(metric.getField()); if (fieldMapper instanceof NumberFieldMapper) { @@ -426,7 +441,7 @@ public void build(Map fieldProducerMap) throws IOExce String dimension = dimensionsSplitOrder.get(i).getField(); FieldInfo dimensionFieldInfo = state.fieldInfos.fieldInfo(dimension); if (dimensionFieldInfo == null) { - dimensionFieldInfo = getFieldInfo(dimension); + dimensionFieldInfo = getFieldInfo(dimension, DocValuesType.SORTED_NUMERIC); } dimensionReaders[i] = new SequentialDocValuesIterator( fieldProducerMap.get(dimensionFieldInfo.name).getSortedNumeric(dimensionFieldInfo) @@ -438,15 +453,15 @@ public void build(Map fieldProducerMap) throws IOExce logger.debug("Finished Building star-tree in ms : {}", (System.currentTimeMillis() - startTime)); } - private static FieldInfo getFieldInfo(String field) { + private static FieldInfo getFieldInfo(String field, DocValuesType docValuesType) { return new FieldInfo( field, - 1, + 1, // This is filled as part of doc values creation and is not used otherwise false, false, false, IndexOptions.NONE, - DocValuesType.SORTED_NUMERIC, + docValuesType, -1, Collections.emptyMap(), 0, @@ -470,20 +485,44 @@ public List getMetricReaders(SegmentWriteState stat List metricReaders = new ArrayList<>(); for (Metric metric : this.starTreeField.getMetrics()) { for (MetricStat metricStat : metric.getMetrics()) { + SequentialDocValuesIterator metricReader = null; FieldInfo metricFieldInfo = state.fieldInfos.fieldInfo(metric.getField()); - if (metricFieldInfo == null) { - metricFieldInfo = getFieldInfo(metric.getField()); + if (metricStat.equals(MetricStat.DOC_COUNT)) { + // _doc_count is numeric field , so we convert to sortedNumericDocValues and get iterator + metricReader = getIteratorForNumericField(fieldProducerMap, metricFieldInfo, DocCountFieldMapper.NAME); + } else { + if (metricFieldInfo == null) { + metricFieldInfo = getFieldInfo(metric.getField(), DocValuesType.SORTED_NUMERIC); + } + metricReader = new SequentialDocValuesIterator( + fieldProducerMap.get(metricFieldInfo.name).getSortedNumeric(metricFieldInfo) + ); } - - SequentialDocValuesIterator metricReader = new SequentialDocValuesIterator( - fieldProducerMap.get(metricFieldInfo.name).getSortedNumeric(metricFieldInfo) - ); metricReaders.add(metricReader); } } return metricReaders; } + /** + * Converts numericDocValues to sortedNumericDocValues and returns SequentialDocValuesIterator + */ + private SequentialDocValuesIterator getIteratorForNumericField( + Map fieldProducerMap, + FieldInfo fieldInfo, + String name + ) throws IOException { + if (fieldInfo == null) { + fieldInfo = getFieldInfo(name, DocValuesType.NUMERIC); + } + SequentialDocValuesIterator sequentialDocValuesIterator; + assert fieldProducerMap.containsKey(fieldInfo.name); + sequentialDocValuesIterator = new SequentialDocValuesIterator( + DocValues.singleton(fieldProducerMap.get(fieldInfo.name).getNumeric(fieldInfo)) + ); + return sequentialDocValuesIterator; + } + /** * Builds the star tree using Star-Tree Document * diff --git a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java index dea389bb6a0ff..94b8c6181de4e 100644 --- a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java +++ b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java @@ -264,13 +264,13 @@ public CacheHelper getReaderCacheHelper() { } @Override - public FloatVectorValues getFloatVectorValues(String field) throws IOException { - return getFloatVectorValues(field); + public FloatVectorValues getFloatVectorValues(String field) { + throw new UnsupportedOperationException(); } @Override - public ByteVectorValues getByteVectorValues(String field) throws IOException { - return getByteVectorValues(field); + public ByteVectorValues getByteVectorValues(String field) { + throw new UnsupportedOperationException(); } @Override diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java index e230e37e6d826..fe81f19d74b21 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java @@ -159,10 +159,9 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S @Override public Query termQuery(Object value, QueryShardContext context) { Query query = typeFieldMapper.mappedFieldType.termQuery(value, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -176,10 +175,9 @@ public Query termQuery(Object value, QueryShardContext context) { @Override public Query termQueryCaseInsensitive(Object value, @Nullable QueryShardContext context) { Query query = typeFieldMapper.mappedFieldType.termQueryCaseInsensitive(value, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -195,10 +193,9 @@ public Query termQueryCaseInsensitive(Object value, @Nullable QueryShardContext @Override public Query termsQuery(List values, @Nullable QueryShardContext context) { Query query = typeFieldMapper.mappedFieldType.termsQuery(values, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -230,10 +227,9 @@ public Query rangeQuery( parser, context ); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); return new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -251,10 +247,9 @@ public Query fuzzyQuery( QueryShardContext context ) { Query query = typeFieldMapper.mappedFieldType.fuzzyQuery(value, fuzziness, prefixLength, maxExpansions, transpositions, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -289,10 +284,9 @@ public Query fuzzyQuery( method, context ); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -316,10 +310,9 @@ public Query prefixQuery( QueryShardContext context ) { Query query = typeFieldMapper.mappedFieldType.prefixQuery(value, method, caseInsensitive, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -343,10 +336,9 @@ public Query wildcardQuery( QueryShardContext context ) { Query query = typeFieldMapper.mappedFieldType.wildcardQuery(value, method, caseInsensitive, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -365,10 +357,9 @@ public Query wildcardQuery( @Override public Query normalizedWildcardQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) { Query query = typeFieldMapper.mappedFieldType.normalizedWildcardQuery(value, method, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -394,10 +385,9 @@ public Query regexpQuery( QueryShardContext context ) { Query query = typeFieldMapper.mappedFieldType.regexpQuery(value, syntaxFlags, matchFlags, maxDeterminizedStates, method, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -416,10 +406,9 @@ public Query regexpQuery( @Override public Query phraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) throws IOException { Query query = typeFieldMapper.mappedFieldType.phraseQuery(stream, slop, enablePositionIncrements, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -441,10 +430,9 @@ public Query phraseQuery(TokenStream stream, int slop, boolean enablePositionInc public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) throws IOException { Query query = typeFieldMapper.mappedFieldType.multiPhraseQuery(stream, slop, enablePositionIncrements, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -465,10 +453,9 @@ public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositi @Override public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions, QueryShardContext context) throws IOException { Query query = typeFieldMapper.mappedFieldType.phrasePrefixQuery(stream, slop, maxExpansions, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -493,10 +480,9 @@ public SpanQuery spanPrefixQuery(String value, SpanMultiTermQueryWrapper.SpanRew @Override public Query distanceFeatureQuery(Object origin, String pivot, float boost, QueryShardContext context) { Query query = typeFieldMapper.mappedFieldType.distanceFeatureQuery(origin, pivot, boost, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); return new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -507,10 +493,9 @@ public Query distanceFeatureQuery(Object origin, String pivot, float boost, Quer @Override public Query geoShapeQuery(Geometry shape, String fieldName, ShapeRelation relation, QueryShardContext context) { Query query = ((GeoShapeQueryable) (typeFieldMapper.mappedFieldType)).geoShapeQuery(shape, fieldName, relation, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); return new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java index b82fa3999612a..bf8f83e1b95df 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java @@ -568,11 +568,7 @@ protected void parseCreateField(ParseContext context) throws IOException { if (context.externalValueSet()) { String value = context.externalValue().toString(); parseValueAddFields(context, value, fieldType().name()); - } else if (context.parser().currentToken() == XContentParser.Token.VALUE_NULL) { - context.parser().nextToken(); // This triggers an exception in DocumentParser. - // We could remove the above nextToken() call to skip the null value, but the existing - // behavior (since 2.7) throws the exception. - } else { + } else if (context.parser().currentToken() != XContentParser.Token.VALUE_NULL) { JsonToStringXContentParser jsonToStringParser = new JsonToStringXContentParser( NamedXContentRegistry.EMPTY, DeprecationHandler.IGNORE_DEPRECATIONS, diff --git a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java index d9539f9dc0c82..e52d6a621e4e8 100644 --- a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java @@ -28,6 +28,7 @@ import java.util.Locale; import java.util.Map; import java.util.Optional; +import java.util.Queue; import java.util.Set; import java.util.stream.Collectors; @@ -225,6 +226,10 @@ private List buildMetrics(String fieldName, Map map, Map for (Object metric : metricsList) { Map metricMap = (Map) metric; String name = (String) XContentMapValues.extractValue(CompositeDataCubeFieldType.NAME, metricMap); + // Handle _doc_count metric separately at the end + if (name.equals(DocCountFieldMapper.NAME)) { + continue; + } metricMap.remove(CompositeDataCubeFieldType.NAME); if (objbuilder == null || objbuilder.mappersBuilders == null) { metrics.add(getMetric(name, metricMap, context)); @@ -249,7 +254,8 @@ private List buildMetrics(String fieldName, Map map, Map } else { throw new MapperParsingException(String.format(Locale.ROOT, "unable to parse metrics for star tree field [%s]", this.name)); } - + Metric docCountMetric = new Metric(DocCountFieldMapper.NAME, List.of(MetricStat.DOC_COUNT)); + metrics.add(docCountMetric); return metrics; } @@ -262,17 +268,50 @@ private Metric getMetric(String name, Map metric, Mapper.TypePar .collect(Collectors.toList()); metric.remove(STATS); if (metricStrings.isEmpty()) { - metricTypes = new ArrayList<>(StarTreeIndexSettings.DEFAULT_METRICS_LIST.get(context.getSettings())); - } else { - Set metricSet = new LinkedHashSet<>(); - for (String metricString : metricStrings) { - metricSet.add(MetricStat.fromTypeName(metricString)); - } - metricTypes = new ArrayList<>(metricSet); + metricStrings = new ArrayList<>(StarTreeIndexSettings.DEFAULT_METRICS_LIST.get(context.getSettings())); + } + // Add all required metrics initially + Set metricSet = new LinkedHashSet<>(); + for (String metricString : metricStrings) { + MetricStat metricStat = MetricStat.fromTypeName(metricString); + metricSet.add(metricStat); + addBaseMetrics(metricStat, metricSet); } + addEligibleDerivedMetrics(metricSet); + metricTypes = new ArrayList<>(metricSet); return new Metric(name, metricTypes); } + /** + * Add base metrics of derived metric to metric set + */ + private void addBaseMetrics(MetricStat metricStat, Set metricSet) { + if (metricStat.isDerivedMetric()) { + Queue metricQueue = new LinkedList<>(metricStat.getBaseMetrics()); + while (metricQueue.isEmpty() == false) { + MetricStat metric = metricQueue.poll(); + if (metric.isDerivedMetric() && !metricSet.contains(metric)) { + metricQueue.addAll(metric.getBaseMetrics()); + } + metricSet.add(metric); + } + } + } + + /** + * Add derived metrics if all associated base metrics are present + */ + private void addEligibleDerivedMetrics(Set metricStats) { + for (MetricStat metric : MetricStat.values()) { + if (metric.isDerivedMetric() && !metricStats.contains(metric)) { + List sourceMetrics = metric.getBaseMetrics(); + if (metricStats.containsAll(sourceMetrics)) { + metricStats.add(metric); + } + } + } + } + @Override protected List> getParameters() { return List.of(config); diff --git a/server/src/main/java/org/opensearch/index/query/DerivedFieldQuery.java b/server/src/main/java/org/opensearch/index/query/DerivedFieldQuery.java index db943bdef0a12..dcc02726cb0ef 100644 --- a/server/src/main/java/org/opensearch/index/query/DerivedFieldQuery.java +++ b/server/src/main/java/org/opensearch/index/query/DerivedFieldQuery.java @@ -30,6 +30,7 @@ import java.util.List; import java.util.Objects; import java.util.function.Function; +import java.util.function.Supplier; /** * DerivedFieldQuery used for querying derived fields. It contains the logic to execute an input lucene query against @@ -37,7 +38,7 @@ */ public final class DerivedFieldQuery extends Query { private final Query query; - private final DerivedFieldValueFetcher valueFetcher; + private final Supplier valueFetcherSupplier; private final SearchLookup searchLookup; private final Analyzer indexAnalyzer; private final boolean ignoreMalformed; @@ -46,20 +47,19 @@ public final class DerivedFieldQuery extends Query { /** * @param query lucene query to be executed against the derived field - * @param valueFetcher DerivedFieldValueFetcher ValueFetcher to fetch the value of a derived field from _source - * using LeafSearchLookup + * @param valueFetcherSupplier Supplier of a DerivedFieldValueFetcher that will be reconstructed per leaf * @param searchLookup SearchLookup to get the LeafSearchLookup look used by valueFetcher to fetch the _source */ public DerivedFieldQuery( Query query, - DerivedFieldValueFetcher valueFetcher, + Supplier valueFetcherSupplier, SearchLookup searchLookup, Analyzer indexAnalyzer, Function indexableFieldGenerator, boolean ignoreMalformed ) { this.query = query; - this.valueFetcher = valueFetcher; + this.valueFetcherSupplier = valueFetcherSupplier; this.searchLookup = searchLookup; this.indexAnalyzer = indexAnalyzer; this.indexableFieldGenerator = indexableFieldGenerator; @@ -77,7 +77,15 @@ public Query rewrite(IndexSearcher indexSearcher) throws IOException { if (rewritten == query) { return this; } - return new DerivedFieldQuery(rewritten, valueFetcher, searchLookup, indexAnalyzer, indexableFieldGenerator, ignoreMalformed); + ; + return new DerivedFieldQuery( + rewritten, + valueFetcherSupplier, + searchLookup, + indexAnalyzer, + indexableFieldGenerator, + ignoreMalformed + ); } @Override @@ -88,6 +96,11 @@ public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float bo public Scorer scorer(LeafReaderContext context) { DocIdSetIterator approximation; approximation = DocIdSetIterator.all(context.reader().maxDoc()); + + // Create a new ValueFetcher per thread. + // ValueFetcher.setNextReader creates a DerivedFieldScript and internally SourceLookup and these objects are not + // thread safe. + final DerivedFieldValueFetcher valueFetcher = valueFetcherSupplier.get(); valueFetcher.setNextReader(context); LeafSearchLookup leafSearchLookup = searchLookup.getLeafSearchLookup(context); TwoPhaseIterator twoPhase = new TwoPhaseIterator(approximation) { diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index a5e0c10f72301..b2bc8a0294a49 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -20,20 +20,27 @@ import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; import java.nio.ByteBuffer; +import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.Set; +import java.util.TreeSet; import java.util.function.Function; import java.util.stream.Collectors; @@ -373,4 +380,173 @@ public static boolean isSwitchToStrictCompatibilityMode(ClusterUpdateSettingsReq incomingSettings ) == RemoteStoreNodeService.CompatibilityMode.STRICT; } + + /** + * Determines and returns a set of metadata files that match provided pinned timestamps. + * + * This method is an overloaded version of getPinnedTimestampLockedFiles and do not use cached entries to find + * the metadata file + * + * @param metadataFiles A list of metadata file names. Expected to be sorted in descending order of timestamp. + * @param pinnedTimestampSet A set of timestamps representing pinned points in time. + * @param getTimestampFunction A function that extracts the timestamp from a metadata file name. + * @param prefixFunction A function that extracts a tuple of prefix information from a metadata file name. + * @return A set of metadata file names that are implicitly locked based on the pinned timestamps. + */ + public static Set getPinnedTimestampLockedFiles( + List metadataFiles, + Set pinnedTimestampSet, + Function getTimestampFunction, + Function> prefixFunction + ) { + return getPinnedTimestampLockedFiles(metadataFiles, pinnedTimestampSet, new HashMap<>(), getTimestampFunction, prefixFunction); + } + + /** + * Determines and returns a set of metadata files that match provided pinned timestamps. If pinned timestamp + * feature is not enabled, this function is a no-op. + * + * This method identifies metadata files that are considered implicitly locked due to their timestamps + * matching or being the closest preceding timestamp to the pinned timestamps. It uses a caching mechanism + * to improve performance for previously processed timestamps. + * + * The method performs the following steps: + * 1. Validates input parameters. + * 2. Updates the cache (metadataFilePinnedTimestampMap) to remove outdated entries. + * 3. Processes cached entries and identifies new timestamps to process. + * 4. For new timestamps, iterates through metadata files to find matching or closest preceding files. + * 5. Updates the cache with newly processed timestamps and their corresponding metadata files. + * + * @param metadataFiles A list of metadata file names. Expected to be sorted in descending order of timestamp. + * @param pinnedTimestampSet A set of timestamps representing pinned points in time. + * @param metadataFilePinnedTimestampMap A map used for caching processed timestamps and their corresponding metadata files. + * @param getTimestampFunction A function that extracts the timestamp from a metadata file name. + * @param prefixFunction A function that extracts a tuple of prefix information from a metadata file name. + * @return A set of metadata file names that are implicitly locked based on the pinned timestamps. + * + */ + public static Set getPinnedTimestampLockedFiles( + List metadataFiles, + Set pinnedTimestampSet, + Map metadataFilePinnedTimestampMap, + Function getTimestampFunction, + Function> prefixFunction + ) { + Set implicitLockedFiles = new HashSet<>(); + + if (RemoteStoreSettings.isPinnedTimestampsEnabled() == false) { + return implicitLockedFiles; + } + + if (metadataFiles == null || metadataFiles.isEmpty() || pinnedTimestampSet == null) { + return implicitLockedFiles; + } + + // Remove entries for timestamps that are no longer pinned + metadataFilePinnedTimestampMap.keySet().retainAll(pinnedTimestampSet); + + // Add cached entries and collect new timestamps + Set newPinnedTimestamps = new TreeSet<>(Collections.reverseOrder()); + for (Long pinnedTimestamp : pinnedTimestampSet) { + String cachedFile = metadataFilePinnedTimestampMap.get(pinnedTimestamp); + if (cachedFile != null) { + implicitLockedFiles.add(cachedFile); + } else { + newPinnedTimestamps.add(pinnedTimestamp); + } + } + + if (newPinnedTimestamps.isEmpty()) { + return implicitLockedFiles; + } + + // Sort metadata files in descending order of timestamp + // ToDo: Do we really need this? Files fetched from remote store are already lexicographically sorted. + metadataFiles.sort(String::compareTo); + + // If we have metadata files from multiple writers, it can result in picking file generated by stale primary. + // To avoid this, we fail fast. + RemoteStoreUtils.verifyNoMultipleWriters(metadataFiles, prefixFunction); + + Iterator timestampIterator = newPinnedTimestamps.iterator(); + Long currentPinnedTimestamp = timestampIterator.next(); + long prevMdTimestamp = Long.MAX_VALUE; + for (String metadataFileName : metadataFiles) { + long currentMdTimestamp = getTimestampFunction.apply(metadataFileName); + // We always prefer md file with higher values of prefix like primary term, generation etc. + if (currentMdTimestamp > prevMdTimestamp) { + continue; + } + while (currentMdTimestamp <= currentPinnedTimestamp && prevMdTimestamp > currentPinnedTimestamp) { + implicitLockedFiles.add(metadataFileName); + // Do not cache entry for latest metadata file as the next metadata can also match the same pinned timestamp + if (prevMdTimestamp != Long.MAX_VALUE) { + metadataFilePinnedTimestampMap.put(currentPinnedTimestamp, metadataFileName); + } + if (timestampIterator.hasNext() == false) { + return implicitLockedFiles; + } + currentPinnedTimestamp = timestampIterator.next(); + } + prevMdTimestamp = currentMdTimestamp; + } + + return implicitLockedFiles; + } + + /** + * Filters out metadata files based on their age and pinned timestamps settings. + * + * This method filters a list of metadata files, keeping only those that are older + * than a certain threshold determined by the last successful fetch of pinned timestamps + * and a configured lookback interval. + * + * @param metadataFiles A list of metadata file names to be filtered. + * @param getTimestampFunction A function that extracts a timestamp from a metadata file name. + * @param lastSuccessfulFetchOfPinnedTimestamps The timestamp of the last successful fetch of pinned timestamps. + * @return A new list containing only the metadata files that meet the age criteria. + * If pinned timestamps are not enabled, returns a copy of the input list. + */ + public static List filterOutMetadataFilesBasedOnAge( + List metadataFiles, + Function getTimestampFunction, + long lastSuccessfulFetchOfPinnedTimestamps + ) { + if (RemoteStoreSettings.isPinnedTimestampsEnabled() == false) { + return new ArrayList<>(metadataFiles); + } + long maximumAllowedTimestamp = lastSuccessfulFetchOfPinnedTimestamps - RemoteStoreSettings.getPinnedTimestampsLookbackInterval() + .getMillis(); + List metadataFilesWithMinAge = new ArrayList<>(); + for (String metadataFileName : metadataFiles) { + long metadataTimestamp = getTimestampFunction.apply(metadataFileName); + if (metadataTimestamp < maximumAllowedTimestamp) { + metadataFilesWithMinAge.add(metadataFileName); + } + } + return metadataFilesWithMinAge; + } + + /** + * Determines if the pinned timestamp state is stale. + * + * This method checks whether the last successful fetch of pinned timestamps + * is considered stale based on the current time and configured intervals. + * The state is considered stale if the last successful fetch occurred before + * a certain threshold, which is calculated as three times the scheduler interval + * plus the lookback interval. + * + * @return true if the pinned timestamp state is stale, false otherwise. + * Always returns false if pinned timestamps are not enabled. + */ + public static boolean isPinnedTimestampStateStale() { + if (RemoteStoreSettings.isPinnedTimestampsEnabled() == false) { + return false; + } + long lastSuccessfulFetchTimestamp = RemoteStorePinnedTimestampService.getPinnedTimestamps().v1(); + long staleBufferInMillis = (RemoteStoreSettings.getPinnedTimestampsSchedulerInterval().millis() * 3) + RemoteStoreSettings + .getPinnedTimestampsLookbackInterval() + .millis(); + return lastSuccessfulFetchTimestamp < (System.currentTimeMillis() - staleBufferInMillis); + } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 8c0ecb4cc783a..26871429e41d6 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -40,6 +40,7 @@ import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; import org.opensearch.threadpool.ThreadPool; import java.io.FileNotFoundException; @@ -91,6 +92,8 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement private final RemoteStoreLockManager mdLockManager; + private final Map metadataFilePinnedTimestampMap; + private final ThreadPool threadPool; /** @@ -132,6 +135,7 @@ public RemoteSegmentStoreDirectory( this.remoteMetadataDirectory = remoteMetadataDirectory; this.mdLockManager = mdLockManager; this.threadPool = threadPool; + this.metadataFilePinnedTimestampMap = new HashMap<>(); this.logger = Loggers.getLogger(getClass(), shardId); init(); } @@ -176,6 +180,42 @@ public RemoteSegmentMetadata initializeToSpecificCommit(long primaryTerm, long c return remoteSegmentMetadata; } + /** + * Initializes the remote segment metadata to a specific timestamp. + * + * @param timestamp The timestamp to initialize the remote segment metadata to. + * @return The RemoteSegmentMetadata object corresponding to the specified timestamp, or null if no metadata file is found for that timestamp. + * @throws IOException If an I/O error occurs while reading the metadata file. + */ + public RemoteSegmentMetadata initializeToSpecificTimestamp(long timestamp) throws IOException { + List metadataFiles = remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + MetadataFilenameUtils.METADATA_PREFIX, + Integer.MAX_VALUE + ); + Set lockedMetadataFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFiles, + Set.of(timestamp), + MetadataFilenameUtils::getTimestamp, + MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ); + if (lockedMetadataFiles.isEmpty()) { + return null; + } + if (lockedMetadataFiles.size() > 1) { + throw new IOException( + "Expected exactly one metadata file matching timestamp: " + timestamp + " but got " + lockedMetadataFiles + ); + } + String metadataFile = lockedMetadataFiles.iterator().next(); + RemoteSegmentMetadata remoteSegmentMetadata = readMetadataFile(metadataFile); + if (remoteSegmentMetadata != null) { + this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(remoteSegmentMetadata.getMetadata()); + } else { + this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(); + } + return remoteSegmentMetadata; + } + /** * Read the latest metadata file to get the list of segments uploaded to the remote segment store. * We upload a metadata file per refresh, but it is not unique per refresh. Refresh metadata file is unique for a given commit. @@ -324,7 +364,8 @@ public static String getMetadataFilename( long translogGeneration, long uploadCounter, int metadataVersion, - String nodeId + String nodeId, + long creationTimestamp ) { return String.join( SEPARATOR, @@ -334,11 +375,30 @@ public static String getMetadataFilename( RemoteStoreUtils.invertLong(translogGeneration), RemoteStoreUtils.invertLong(uploadCounter), String.valueOf(Objects.hash(nodeId)), - RemoteStoreUtils.invertLong(System.currentTimeMillis()), + RemoteStoreUtils.invertLong(creationTimestamp), String.valueOf(metadataVersion) ); } + public static String getMetadataFilename( + long primaryTerm, + long generation, + long translogGeneration, + long uploadCounter, + int metadataVersion, + String nodeId + ) { + return getMetadataFilename( + primaryTerm, + generation, + translogGeneration, + uploadCounter, + metadataVersion, + nodeId, + System.currentTimeMillis() + ); + } + // Visible for testing static long getPrimaryTerm(String[] filenameTokens) { return RemoteStoreUtils.invertLong(filenameTokens[1]); @@ -349,6 +409,11 @@ static long getGeneration(String[] filenameTokens) { return RemoteStoreUtils.invertLong(filenameTokens[2]); } + public static long getTimestamp(String filename) { + String[] filenameTokens = filename.split(SEPARATOR); + return RemoteStoreUtils.invertLong(filenameTokens[6]); + } + public static Tuple getNodeIdByPrimaryTermAndGen(String filename) { String[] tokens = filename.split(SEPARATOR); if (tokens.length < 8) { @@ -773,6 +838,7 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException ); return; } + List sortedMetadataFileList = remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( MetadataFilenameUtils.METADATA_PREFIX, Integer.MAX_VALUE @@ -786,16 +852,44 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException return; } - List metadataFilesEligibleToDelete = new ArrayList<>( - sortedMetadataFileList.subList(lastNMetadataFilesToKeep, sortedMetadataFileList.size()) + // Check last fetch status of pinned timestamps. If stale, return. + if (RemoteStoreUtils.isPinnedTimestampStateStale()) { + logger.warn("Skipping remote segment store garbage collection as last fetch of pinned timestamp is stale"); + return; + } + + Tuple> pinnedTimestampsState = RemoteStorePinnedTimestampService.getPinnedTimestamps(); + + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + sortedMetadataFileList, + pinnedTimestampsState.v2(), + metadataFilePinnedTimestampMap, + MetadataFilenameUtils::getTimestamp, + MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen ); - Set allLockFiles; + final Set allLockFiles = new HashSet<>(implicitLockedFiles); + try { - allLockFiles = ((RemoteStoreMetadataLockManager) mdLockManager).fetchLockedMetadataFiles(MetadataFilenameUtils.METADATA_PREFIX); + allLockFiles.addAll( + ((RemoteStoreMetadataLockManager) mdLockManager).fetchLockedMetadataFiles(MetadataFilenameUtils.METADATA_PREFIX) + ); } catch (Exception e) { logger.error("Exception while fetching segment metadata lock files, skipping deleteStaleSegments", e); return; } + + List metadataFilesEligibleToDelete = new ArrayList<>( + sortedMetadataFileList.subList(lastNMetadataFilesToKeep, sortedMetadataFileList.size()) + ); + + // Along with last N files, we need to keep files since last successful run of scheduler + long lastSuccessfulFetchOfPinnedTimestamps = pinnedTimestampsState.v1(); + metadataFilesEligibleToDelete = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFilesEligibleToDelete, + MetadataFilenameUtils::getTimestamp, + lastSuccessfulFetchOfPinnedTimestamps + ); + List metadataFilesToBeDeleted = metadataFilesEligibleToDelete.stream() .filter(metadataFile -> allLockFiles.contains(metadataFile) == false) .collect(Collectors.toList()); diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index 93946fa11de13..71f8cf5a78ec5 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -310,12 +310,11 @@ BytesReference getOrCompute( * @param cacheKey the cache key to invalidate */ void invalidate(IndicesService.IndexShardCacheEntity cacheEntity, DirectoryReader reader, BytesReference cacheKey) { - assert reader.getReaderCacheHelper() != null; - String readerCacheKeyId = null; - if (reader instanceof OpenSearchDirectoryReader) { - IndexReader.CacheHelper cacheHelper = ((OpenSearchDirectoryReader) reader).getDelegatingCacheHelper(); - readerCacheKeyId = ((OpenSearchDirectoryReader.DelegatingCacheHelper) cacheHelper).getDelegatingCacheKey().getId(); - } + assert reader.getReaderCacheHelper() instanceof OpenSearchDirectoryReader.DelegatingCacheHelper; + OpenSearchDirectoryReader.DelegatingCacheHelper delegatingCacheHelper = (OpenSearchDirectoryReader.DelegatingCacheHelper) reader + .getReaderCacheHelper(); + String readerCacheKeyId = delegatingCacheHelper.getDelegatingCacheKey().getId(); + IndexShard indexShard = (IndexShard) cacheEntity.getCacheIdentity(); cache.invalidate(getICacheKey(new Key(indexShard.shardId(), cacheKey, readerCacheKeyId, System.identityHashCode(indexShard)))); } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 902ca95643625..a78328e24c588 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -68,6 +68,7 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.lucene.index.OpenSearchDirectoryReader.DelegatingCacheHelper; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; @@ -1754,8 +1755,7 @@ public boolean canCache(ShardSearchRequest request, SearchContext context) { if (context.getQueryShardContext().isCacheable() == false) { return false; } - return true; - + return context.searcher().getDirectoryReader().getReaderCacheHelper() instanceof DelegatingCacheHelper; } /** diff --git a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java index 495288626627b..55280ca5c96d6 100644 --- a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java +++ b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java @@ -134,6 +134,15 @@ public class RemoteStoreSettings { Property.Dynamic ); + /** + * Controls pinned timestamp feature enablement + */ + public static final Setting CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED = Setting.boolSetting( + "cluster.remote_store.pinned_timestamps.enabled", + false, + Setting.Property.NodeScope + ); + /** * Controls pinned timestamp scheduler interval */ @@ -163,6 +172,7 @@ public class RemoteStoreSettings { private volatile RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm; private volatile int maxRemoteTranslogReaders; private volatile boolean isTranslogMetadataEnabled; + private static volatile boolean isPinnedTimestampsEnabled; private static volatile TimeValue pinnedTimestampsSchedulerInterval; private static volatile TimeValue pinnedTimestampsLookbackInterval; @@ -205,6 +215,7 @@ public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { pinnedTimestampsSchedulerInterval = CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_SCHEDULER_INTERVAL.get(settings); pinnedTimestampsLookbackInterval = CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_LOOKBACK_INTERVAL.get(settings); + isPinnedTimestampsEnabled = CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.get(settings); } public TimeValue getClusterRemoteTranslogBufferInterval() { @@ -280,4 +291,8 @@ public static TimeValue getPinnedTimestampsSchedulerInterval() { public static TimeValue getPinnedTimestampsLookbackInterval() { return pinnedTimestampsLookbackInterval; } + + public static boolean isPinnedTimestampsEnabled() { + return isPinnedTimestampsEnabled; + } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 1dfe77093f051..388e00bedab0c 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -306,6 +306,7 @@ import static org.opensearch.common.util.FeatureFlags.TELEMETRY; import static org.opensearch.env.NodeEnvironment.collectFileCacheDataPath; import static org.opensearch.index.ShardIndexingPressureSettings.SHARD_INDEXING_PRESSURE_ENABLED_ATTRIBUTE_KEY; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreAttributePresent; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; @@ -813,7 +814,7 @@ protected Node( remoteClusterStateCleanupManager = null; } final RemoteStorePinnedTimestampService remoteStorePinnedTimestampService; - if (isRemoteStoreAttributePresent(settings)) { + if (isRemoteStoreAttributePresent(settings) && CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.get(settings)) { remoteStorePinnedTimestampService = new RemoteStorePinnedTimestampService( repositoriesServiceReference::get, settings, diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java index c37db618c2522..f7b262664d147 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java @@ -219,9 +219,9 @@ private ActionListener getListenerForWriteCallResponse( private PinnedTimestamps readExistingPinnedTimestamps(String blobFilename, RemotePinnedTimestamps remotePinnedTimestamps) { remotePinnedTimestamps.setBlobFileName(blobFilename); - remotePinnedTimestamps.setFullBlobName(pinnedTimestampsBlobStore.getBlobPathForUpload(remotePinnedTimestamps)); + remotePinnedTimestamps.setFullBlobName(pinnedTimestampsBlobStore().getBlobPathForUpload(remotePinnedTimestamps)); try { - return pinnedTimestampsBlobStore.read(remotePinnedTimestamps); + return pinnedTimestampsBlobStore().read(remotePinnedTimestamps); } catch (IOException e) { throw new RuntimeException("Failed to read existing pinned timestamps", e); } @@ -245,6 +245,14 @@ public static Tuple> getPinnedTimestamps() { return pinnedTimestampsSet; } + public RemoteStorePinnedTimestampsBlobStore pinnedTimestampsBlobStore() { + return pinnedTimestampsBlobStore; + } + + public BlobStoreTransferService blobStoreTransferService() { + return blobStoreTransferService; + } + /** * Inner class for asynchronously updating the pinned timestamp set. */ @@ -266,11 +274,12 @@ protected void runInternal() { clusterService.state().metadata().clusterUUID(), blobStoreRepository.getCompressor() ); - BlobPath path = pinnedTimestampsBlobStore.getBlobPathForUpload(remotePinnedTimestamps); - blobStoreTransferService.listAllInSortedOrder(path, remotePinnedTimestamps.getType(), 1, new ActionListener<>() { + BlobPath path = pinnedTimestampsBlobStore().getBlobPathForUpload(remotePinnedTimestamps); + blobStoreTransferService().listAllInSortedOrder(path, remotePinnedTimestamps.getType(), 1, new ActionListener<>() { @Override public void onResponse(List blobMetadata) { if (blobMetadata.isEmpty()) { + pinnedTimestampsSet = new Tuple<>(triggerTimestamp, Set.of()); return; } PinnedTimestamps pinnedTimestamps = readExistingPinnedTimestamps(blobMetadata.get(0).name(), remotePinnedTimestamps); diff --git a/server/src/main/java/org/opensearch/repositories/FilterRepository.java b/server/src/main/java/org/opensearch/repositories/FilterRepository.java index d700a92ed4bad..114cd0260fcca 100644 --- a/server/src/main/java/org/opensearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/opensearch/repositories/FilterRepository.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.Priority; import org.opensearch.common.lifecycle.Lifecycle; import org.opensearch.common.lifecycle.LifecycleListener; import org.opensearch.core.action.ActionListener; @@ -104,6 +105,7 @@ public void finalizeSnapshot( SnapshotInfo snapshotInfo, Version repositoryMetaVersion, Function stateTransformer, + Priority repositoryUpdatePriority, ActionListener listener ) { in.finalizeSnapshot( @@ -113,6 +115,7 @@ public void finalizeSnapshot( snapshotInfo, repositoryMetaVersion, stateTransformer, + repositoryUpdatePriority, listener ); } diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index ed30aad7b4dd2..637503d3f54df 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -41,6 +41,7 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; +import org.opensearch.common.Priority; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lifecycle.LifecycleComponent; import org.opensearch.common.settings.Setting; @@ -150,6 +151,7 @@ default Repository create(RepositoryMetadata metadata, Function stateTransformer, + Priority repositoryUpdatePriority, ActionListener listener ); diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 3e6a75565891f..e18706824d39d 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -65,6 +65,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.Numbers; +import org.opensearch.common.Priority; import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; import org.opensearch.common.blobstore.BlobContainer; @@ -266,6 +267,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp public static final Setting REMOTE_STORE_INDEX_SHALLOW_COPY = Setting.boolSetting("remote_store_index_shallow_copy", false); + public static final Setting SHALLOW_SNAPSHOT_V2 = Setting.boolSetting("shallow_snapshot_v2", false); + /** * Setting to set batch size of stale snapshot shard blobs that will be deleted by snapshot workers as part of snapshot deletion. * For optimal performance the value of the setting should be equal to or close to repository's max # of keys that can be deleted in single operation @@ -1046,6 +1049,7 @@ private void doDeleteShardSnapshots( repositoryStateId, repoMetaVersion, Function.identity(), + Priority.NORMAL, ActionListener.wrap(writeUpdatedRepoDataStep::onResponse, listener::onFailure) ); }, listener::onFailure); @@ -1520,6 +1524,7 @@ public void cleanup( repositoryStateId, repositoryMetaVersion, Function.identity(), + Priority.NORMAL, ActionListener.wrap( v -> cleanupStaleBlobs( Collections.emptyList(), @@ -1723,6 +1728,7 @@ public void finalizeSnapshot( SnapshotInfo snapshotInfo, Version repositoryMetaVersion, Function stateTransformer, + Priority repositoryUpdatePriority, final ActionListener listener ) { assert repositoryStateId > RepositoryData.UNKNOWN_REPO_GEN : "Must finalize based on a valid repository generation but received [" @@ -1759,6 +1765,7 @@ public void finalizeSnapshot( repositoryStateId, repositoryMetaVersion, stateTransformer, + repositoryUpdatePriority, ActionListener.wrap(newRepoData -> { cleanupOldShardGens(existingRepositoryData, updatedRepositoryData); listener.onResponse(newRepoData); @@ -2280,10 +2287,11 @@ public boolean isSystemRepository() { * Lastly, the {@link RepositoryMetadata} entry for this repository is updated to the new generation {@code P + 1} and thus * pending and safe generation are set to the same value marking the end of the update of the repository data. * - * @param repositoryData RepositoryData to write - * @param expectedGen expected repository generation at the start of the operation - * @param version version of the repository metadata to write - * @param stateFilter filter for the last cluster state update executed by this method + * @param repositoryData RepositoryData to write + * @param expectedGen expected repository generation at the start of the operation + * @param version version of the repository metadata to write + * @param stateFilter filter for the last cluster state update executed by this method + * @param repositoryUpdatePriority priority for the cluster state update task * @param listener completion listener */ protected void writeIndexGen( @@ -2291,6 +2299,7 @@ protected void writeIndexGen( long expectedGen, Version version, Function stateFilter, + Priority repositoryUpdatePriority, ActionListener listener ) { assert isReadOnly() == false; // can not write to a read only repository @@ -2315,7 +2324,7 @@ protected void writeIndexGen( final StepListener setPendingStep = new StepListener<>(); clusterService.submitStateUpdateTask( "set pending repository generation [" + metadata.name() + "][" + expectedGen + "]", - new ClusterStateUpdateTask() { + new ClusterStateUpdateTask(repositoryUpdatePriority) { private long newGen; @@ -2453,7 +2462,7 @@ public void onFailure(Exception e) { // Step 3: Update CS to reflect new repository generation. clusterService.submitStateUpdateTask( "set safe repository generation [" + metadata.name() + "][" + newGen + "]", - new ClusterStateUpdateTask() { + new ClusterStateUpdateTask(repositoryUpdatePriority) { @Override public ClusterState execute(ClusterState currentState) { final RepositoryMetadata meta = getRepoMetadata(currentState); diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestBulkStreamingAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestBulkStreamingAction.java index a38244fe9ff20..2e0d1b8ead814 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestBulkStreamingAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestBulkStreamingAction.java @@ -36,6 +36,7 @@ import java.io.IOException; import java.io.UncheckedIOException; +import java.time.Duration; import java.util.List; import java.util.Map; import java.util.concurrent.CompletableFuture; @@ -95,6 +96,17 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final Boolean defaultRequireAlias = request.paramAsBoolean(DocWriteRequest.REQUIRE_ALIAS, null); final TimeValue timeout = request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT); final String refresh = request.param("refresh"); + final TimeValue batchInterval = request.paramAsTime("batch_interval", null); + final int batchSize = request.paramAsInt("batch_size", 1); /* by default, batch size of 1 */ + final boolean hasBatchSize = request.hasParam("batch_size"); /* is batch_size explicitly specified or default is used */ + + if (batchInterval != null && batchInterval.duration() <= 0) { + throw new IllegalArgumentException("The batch_interval value should be non-negative [" + batchInterval.millis() + "ms]."); + } + + if (batchSize <= 0) { + throw new IllegalArgumentException("The batch_size value should be non-negative [" + batchSize + "]."); + } final StreamingRestChannelConsumer consumer = (channel) -> { final MediaType mediaType = request.getMediaType(); @@ -114,39 +126,38 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC // Set the content type and the status code before sending the response stream over channel.prepareResponse(RestStatus.OK, Map.of("Content-Type", List.of(mediaType.mediaTypeWithoutParameters()))); - // This is initial implementation at the moment which transforms each single request stream chunk into - // individual bulk request and streams each response back. Another source of inefficiency comes from converting - // bulk response from raw (json/yaml/...) to model and back to raw (json/yaml/...). - // TODOs: - // - add batching (by interval and/or count) // - eliminate serialization inefficiencies - Flux.from(channel).zipWith(Flux.fromStream(Stream.generate(() -> { + createBufferedFlux(batchInterval, batchSize, hasBatchSize, channel).zipWith(Flux.fromStream(Stream.generate(() -> { BulkRequest bulkRequest = Requests.bulkRequest(); bulkRequest.waitForActiveShards(prepareBulkRequest.waitForActiveShards()); bulkRequest.timeout(prepareBulkRequest.timeout()); bulkRequest.setRefreshPolicy(prepareBulkRequest.getRefreshPolicy()); return bulkRequest; }))).map(t -> { - final HttpChunk chunk = t.getT1(); + boolean isLast = false; + final List chunks = t.getT1(); final BulkRequest bulkRequest = t.getT2(); - try (chunk) { - bulkRequest.add( - chunk.content(), - defaultIndex, - defaultRouting, - defaultFetchSourceContext, - defaultPipeline, - defaultRequireAlias, - allowExplicitIndex, - request.getMediaType() - ); - } catch (final IOException ex) { - throw new UncheckedIOException(ex); + for (final HttpChunk chunk : chunks) { + isLast |= chunk.isLast(); + try (chunk) { + bulkRequest.add( + chunk.content(), + defaultIndex, + defaultRouting, + defaultFetchSourceContext, + defaultPipeline, + defaultRequireAlias, + allowExplicitIndex, + request.getMediaType() + ); + } catch (final IOException ex) { + throw new UncheckedIOException(ex); + } } - return Tuple.tuple(chunk.isLast(), bulkRequest); + return Tuple.tuple(isLast, bulkRequest); }).flatMap(tuple -> { final CompletableFuture f = new CompletableFuture<>(); @@ -222,4 +233,22 @@ public boolean supportsStreaming() { public boolean allowsUnsafeBuffers() { return true; } + + private Flux> createBufferedFlux( + final TimeValue batchInterval, + final int batchSize, + final boolean hasBatchSize, + StreamingRestChannel channel + ) { + if (batchInterval != null) { + // If non-default batch size is specified, buffer by interval and batch + if (hasBatchSize) { + return Flux.from(channel).bufferTimeout(batchSize, Duration.ofMillis(batchInterval.millis())); + } else { + return Flux.from(channel).buffer(Duration.ofMillis(batchInterval.millis())); + } + } else { + return Flux.from(channel).buffer(batchSize); + } + } } diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java index 191b872cdd563..7558c4456109e 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java @@ -98,6 +98,9 @@ public final class SnapshotInfo implements Comparable, ToXContent, private static final String INCLUDE_GLOBAL_STATE = "include_global_state"; private static final String REMOTE_STORE_INDEX_SHALLOW_COPY = "remote_store_index_shallow_copy"; + + private static final String PINNED_TIMESTAMP = "pinned_timestamp"; + private static final String USER_METADATA = "metadata"; private static final Comparator COMPARATOR = Comparator.comparing(SnapshotInfo::startTime) @@ -121,6 +124,7 @@ public static final class SnapshotInfoBuilder { private Boolean includeGlobalState = null; private Boolean remoteStoreIndexShallowCopy = null; + private long pinnedTimestamp = 0L; private Map userMetadata = null; private int version = -1; private List shardFailures = null; @@ -177,6 +181,10 @@ private void setRemoteStoreIndexShallowCopy(Boolean remoteStoreIndexShallowCopy) this.remoteStoreIndexShallowCopy = remoteStoreIndexShallowCopy; } + private void setPinnedTimestamp(long pinnedTimestamp) { + this.pinnedTimestamp = pinnedTimestamp; + } + private void setShardFailures(List shardFailures) { this.shardFailures = shardFailures; } @@ -216,7 +224,8 @@ public SnapshotInfo build() { shardFailures, includeGlobalState, userMetadata, - remoteStoreIndexShallowCopy + remoteStoreIndexShallowCopy, + pinnedTimestamp ); } } @@ -271,6 +280,7 @@ int getSuccessfulShards() { SnapshotInfoBuilder::setRemoteStoreIndexShallowCopy, new ParseField(REMOTE_STORE_INDEX_SHALLOW_COPY) ); + SNAPSHOT_INFO_PARSER.declareLong(SnapshotInfoBuilder::setPinnedTimestamp, new ParseField(PINNED_TIMESTAMP)); SNAPSHOT_INFO_PARSER.declareObjectArray( SnapshotInfoBuilder::setShardFailures, SnapshotShardFailure.SNAPSHOT_SHARD_FAILURE_PARSER, @@ -307,6 +317,7 @@ int getSuccessfulShards() { @Nullable private Boolean remoteStoreIndexShallowCopy; + private long pinnedTimestamp; @Nullable private final Map userMetadata; @@ -316,11 +327,11 @@ int getSuccessfulShards() { private final List shardFailures; public SnapshotInfo(SnapshotId snapshotId, List indices, List dataStreams, SnapshotState state) { - this(snapshotId, indices, dataStreams, state, null, null, 0L, 0L, 0, 0, Collections.emptyList(), null, null, null); + this(snapshotId, indices, dataStreams, state, null, null, 0L, 0L, 0, 0, Collections.emptyList(), null, null, null, 0); } public SnapshotInfo(SnapshotId snapshotId, List indices, List dataStreams, SnapshotState state, Version version) { - this(snapshotId, indices, dataStreams, state, null, version, 0L, 0L, 0, 0, Collections.emptyList(), null, null, null); + this(snapshotId, indices, dataStreams, state, null, version, 0L, 0L, 0, 0, Collections.emptyList(), null, null, null, 0); } public SnapshotInfo(SnapshotsInProgress.Entry entry) { @@ -338,7 +349,8 @@ public SnapshotInfo(SnapshotsInProgress.Entry entry) { Collections.emptyList(), entry.includeGlobalState(), entry.userMetadata(), - entry.remoteStoreIndexShallowCopy() + entry.remoteStoreIndexShallowCopy(), + 0L ); } @@ -353,7 +365,8 @@ public SnapshotInfo( List shardFailures, Boolean includeGlobalState, Map userMetadata, - Boolean remoteStoreIndexShallowCopy + Boolean remoteStoreIndexShallowCopy, + long pinnedTimestamp ) { this( snapshotId, @@ -369,7 +382,8 @@ public SnapshotInfo( shardFailures, includeGlobalState, userMetadata, - remoteStoreIndexShallowCopy + remoteStoreIndexShallowCopy, + pinnedTimestamp ); } @@ -387,7 +401,8 @@ public SnapshotInfo( List shardFailures, Boolean includeGlobalState, Map userMetadata, - Boolean remoteStoreIndexShallowCopy + Boolean remoteStoreIndexShallowCopy, + long pinnedTimestamp ) { this.snapshotId = Objects.requireNonNull(snapshotId); this.indices = Collections.unmodifiableList(Objects.requireNonNull(indices)); @@ -403,6 +418,7 @@ public SnapshotInfo( this.includeGlobalState = includeGlobalState; this.userMetadata = userMetadata; this.remoteStoreIndexShallowCopy = remoteStoreIndexShallowCopy; + this.pinnedTimestamp = pinnedTimestamp; } /** @@ -425,6 +441,9 @@ public SnapshotInfo(final StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_2_9_0)) { remoteStoreIndexShallowCopy = in.readOptionalBoolean(); } + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + pinnedTimestamp = in.readVLong(); + } } /** @@ -539,6 +558,10 @@ public Boolean isRemoteStoreIndexShallowCopyEnabled() { return remoteStoreIndexShallowCopy; } + public long getPinnedTimestamp() { + return pinnedTimestamp; + } + /** * Returns shard failures; an empty list will be returned if there were no shard * failures, or if {@link #state()} returns {@code null}. @@ -606,6 +629,8 @@ public String toString() { + shardFailures + ", isRemoteStoreInteropEnabled=" + remoteStoreIndexShallowCopy + + ", pinnedTimestamp=" + + pinnedTimestamp + '}'; } @@ -641,6 +666,10 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa if (remoteStoreIndexShallowCopy != null) { builder.field(REMOTE_STORE_INDEX_SHALLOW_COPY, remoteStoreIndexShallowCopy); } + if (pinnedTimestamp != 0) { + builder.field(PINNED_TIMESTAMP, pinnedTimestamp); + } + builder.startArray(INDICES); for (String index : indices) { builder.value(index); @@ -699,6 +728,9 @@ private XContentBuilder toXContentInternal(final XContentBuilder builder, final if (remoteStoreIndexShallowCopy != null) { builder.field(REMOTE_STORE_INDEX_SHALLOW_COPY, remoteStoreIndexShallowCopy); } + if (pinnedTimestamp != 0) { + builder.field(PINNED_TIMESTAMP, pinnedTimestamp); + } builder.startArray(INDICES); for (String index : indices) { builder.value(index); @@ -747,6 +779,7 @@ public static SnapshotInfo fromXContentInternal(final XContentParser parser) thr long endTime = 0; int totalShards = 0; int successfulShards = 0; + long pinnedTimestamp = 0; Boolean includeGlobalState = null; Boolean remoteStoreIndexShallowCopy = null; Map userMetadata = null; @@ -788,6 +821,8 @@ public static SnapshotInfo fromXContentInternal(final XContentParser parser) thr includeGlobalState = parser.booleanValue(); } else if (REMOTE_STORE_INDEX_SHALLOW_COPY.equals(currentFieldName)) { remoteStoreIndexShallowCopy = parser.booleanValue(); + } else if (PINNED_TIMESTAMP.equals(currentFieldName)) { + pinnedTimestamp = parser.longValue(); } } else if (token == XContentParser.Token.START_ARRAY) { if (DATA_STREAMS.equals(currentFieldName)) { @@ -840,7 +875,8 @@ public static SnapshotInfo fromXContentInternal(final XContentParser parser) thr shardFailures, includeGlobalState, userMetadata, - remoteStoreIndexShallowCopy + remoteStoreIndexShallowCopy, + pinnedTimestamp ); } @@ -872,6 +908,9 @@ public void writeTo(final StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_9_0)) { out.writeOptionalBoolean(remoteStoreIndexShallowCopy); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeVLong(pinnedTimestamp); + } } private static SnapshotState snapshotState(final String reason, final List shardFailures) { @@ -904,7 +943,8 @@ public boolean equals(Object o) { && Objects.equals(version, that.version) && Objects.equals(shardFailures, that.shardFailures) && Objects.equals(userMetadata, that.userMetadata) - && Objects.equals(remoteStoreIndexShallowCopy, that.remoteStoreIndexShallowCopy); + && Objects.equals(remoteStoreIndexShallowCopy, that.remoteStoreIndexShallowCopy) + && Objects.equals(pinnedTimestamp, that.pinnedTimestamp); } @Override @@ -924,7 +964,8 @@ public int hashCode() { version, shardFailures, userMetadata, - remoteStoreIndexShallowCopy + remoteStoreIndexShallowCopy, + pinnedTimestamp ); } } diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 5e49208465dbb..b7fea116a12b7 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -136,6 +136,7 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.opensearch.repositories.blobstore.BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SHALLOW_SNAPSHOT_V2; import static org.opensearch.snapshots.SnapshotUtils.validateSnapshotsBackingAnyIndex; /** @@ -202,6 +203,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus Setting.Property.Dynamic ); + private static final String SNAPSHOT_PINNED_TIMESTAMP_DELIMITER = ":"; private volatile int maxConcurrentOperations; public SnapshotsService( @@ -251,10 +253,36 @@ public SnapshotsService( * @param listener snapshot completion listener */ public void executeSnapshot(final CreateSnapshotRequest request, final ActionListener listener) { - createSnapshot( - request, - ActionListener.wrap(snapshot -> addListener(snapshot, ActionListener.map(listener, Tuple::v2)), listener::onFailure) - ); + Repository repository = repositoriesService.repository(request.repository()); + + boolean isSnapshotV2 = SHALLOW_SNAPSHOT_V2.get(repository.getMetadata().settings()); + logger.debug("shallow_snapshot_v2 is set as [{}]", isSnapshotV2); + + boolean remoteStoreIndexShallowCopy = remoteStoreShallowCopyEnabled(repository); + if (remoteStoreIndexShallowCopy + && isSnapshotV2 + && request.indices().length == 0 + && clusterService.state().nodes().getMinNodeVersion().onOrAfter(Version.CURRENT)) { + createSnapshotV2(request, listener); + } else { + createSnapshot( + request, + ActionListener.wrap(snapshot -> addListener(snapshot, ActionListener.map(listener, Tuple::v2)), listener::onFailure) + ); + } + } + + private boolean remoteStoreShallowCopyEnabled(Repository repository) { + boolean remoteStoreIndexShallowCopy = REMOTE_STORE_INDEX_SHALLOW_COPY.get(repository.getMetadata().settings()); + logger.debug("remote_store_index_shallow_copy setting is set as [{}]", remoteStoreIndexShallowCopy); + if (remoteStoreIndexShallowCopy + && clusterService.getClusterSettings().get(REMOTE_STORE_COMPATIBILITY_MODE_SETTING).equals(CompatibilityMode.MIXED)) { + // don't allow shallow snapshots if compatibility mode is not strict + logger.warn("Shallow snapshots are not supported during migration. Falling back to full snapshot."); + remoteStoreIndexShallowCopy = false; + } + return remoteStoreIndexShallowCopy; + } /** @@ -262,6 +290,7 @@ public void executeSnapshot(final CreateSnapshotRequest request, final ActionLis *

* This method is used by clients to start snapshot. It makes sure that there is no snapshots are currently running and * creates a snapshot record in cluster state metadata. + *

* * @param request snapshot request * @param listener snapshot creation listener @@ -287,27 +316,13 @@ public void createSnapshot(final CreateSnapshotRequest request, final ActionList @Override public ClusterState execute(ClusterState currentState) { - ensureSnapshotNameAvailableInRepo(repositoryData, snapshotName, repository); + createSnapshotPreValidations(currentState, repositoryData, repositoryName, snapshotName); final SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); final List runningSnapshots = snapshots.entries(); - ensureSnapshotNameNotRunning(runningSnapshots, repositoryName, snapshotName); - validate(repositoryName, snapshotName, currentState); final SnapshotDeletionsInProgress deletionsInProgress = currentState.custom( SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress.EMPTY ); - final RepositoryCleanupInProgress repositoryCleanupInProgress = currentState.custom( - RepositoryCleanupInProgress.TYPE, - RepositoryCleanupInProgress.EMPTY - ); - if (repositoryCleanupInProgress.hasCleanupInProgress()) { - throw new ConcurrentSnapshotExecutionException( - repositoryName, - snapshotName, - "cannot snapshot while a repository cleanup is in-progress in [" + repositoryCleanupInProgress + "]" - ); - } - ensureNoCleanupInProgress(currentState, repositoryName, snapshotName); ensureBelowConcurrencyLimit(repositoryName, snapshotName, snapshots, deletionsInProgress); // Store newSnapshot here to be processed in clusterStateProcessed List indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState, request)); @@ -407,6 +422,184 @@ public TimeValue timeout() { }, "create_snapshot [" + snapshotName + ']', listener::onFailure); } + /** + * Initializes the snapshotting process for clients when Snapshot v2 is enabled. This method is responsible for taking + * a shallow snapshot and pinning the snapshot timestamp.The entire process is executed on the cluster manager node. + * + * Unlike traditional snapshot operations, this method performs a synchronous snapshot execution and doesn't + * upload any shard metadata to the snapshot repository. + * The pinned timestamp is later reconciled with remote store segment and translog metadata files during the restore + * operation. + * + * @param request snapshot request + * @param listener snapshot creation listener + */ + public void createSnapshotV2(final CreateSnapshotRequest request, final ActionListener listener) { + long pinnedTimestamp = System.currentTimeMillis(); + final String repositoryName = request.repository(); + final String snapshotName = indexNameExpressionResolver.resolveDateMathExpression(request.snapshot()); + validate(repositoryName, snapshotName); + + final SnapshotId snapshotId = new SnapshotId(snapshotName, UUIDs.randomBase64UUID()); // new UUID for the snapshot + Repository repository = repositoriesService.repository(repositoryName); + + if (repository.isReadOnly()) { + listener.onFailure( + new RepositoryException(repository.getMetadata().name(), "cannot create snapshot-v2 in a readonly repository") + ); + return; + } + + final Snapshot snapshot = new Snapshot(repositoryName, snapshotId); + ClusterState currentState = clusterService.state(); + final Map userMeta = repository.adaptUserMetadata(request.userMetadata()); + try { + final StepListener repositoryDataListener = new StepListener<>(); + repositoriesService.getRepositoryData(repositoryName, repositoryDataListener); + + repositoryDataListener.whenComplete(repositoryData -> { + createSnapshotPreValidations(currentState, repositoryData, repositoryName, snapshotName); + + List indices = new ArrayList<>(currentState.metadata().indices().keySet()); + + final List dataStreams = indexNameExpressionResolver.dataStreamNames( + currentState, + request.indicesOptions(), + request.indices() + ); + + logger.trace("[{}][{}] creating snapshot-v2 for indices [{}]", repositoryName, snapshotName, indices); + + final SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); + final List runningSnapshots = snapshots.entries(); + + final List indexIds = repositoryData.resolveNewIndices( + indices, + getInFlightIndexIds(runningSnapshots, repositoryName) + ); + final Version version = minCompatibleVersion(currentState.nodes().getMinNodeVersion(), repositoryData, null); + final ShardGenerations shardGenerations = buildShardsGenerationFromRepositoryData( + currentState.metadata(), + currentState.routingTable(), + indexIds, + repositoryData + ); + + if (repositoryData.getGenId() == RepositoryData.UNKNOWN_REPO_GEN) { + logger.debug("[{}] was aborted before starting", snapshot); + throw new SnapshotException(snapshot, "Aborted on initialization"); + } + final SnapshotInfo snapshotInfo = new SnapshotInfo( + snapshot.getSnapshotId(), + shardGenerations.indices().stream().map(IndexId::getName).collect(Collectors.toList()), + dataStreams, + pinnedTimestamp, + null, + System.currentTimeMillis(), + shardGenerations.totalShards(), + Collections.emptyList(), + request.includeGlobalState(), + userMeta, + true, + pinnedTimestamp + ); + if (!clusterService.state().nodes().isLocalNodeElectedClusterManager()) { + throw new SnapshotException(repositoryName, snapshotName, "Aborting snapshot-v2, no longer cluster manager"); + } + final StepListener pinnedTimestampListener = new StepListener<>(); + pinnedTimestampListener.whenComplete(repoData -> { listener.onResponse(snapshotInfo); }, listener::onFailure); + repository.finalizeSnapshot( + shardGenerations, + repositoryData.getGenId(), + metadataForSnapshot(currentState.metadata(), request.includeGlobalState(), false, dataStreams, indexIds), + snapshotInfo, + version, + state -> state, + Priority.IMMEDIATE, + new ActionListener() { + @Override + public void onResponse(RepositoryData repositoryData) { + if (!clusterService.state().nodes().isLocalNodeElectedClusterManager()) { + failSnapshotCompletionListeners( + snapshot, + new SnapshotException(snapshot, "Aborting snapshot-v2, no longer cluster manager") + ); + listener.onFailure( + new SnapshotException(repositoryName, snapshotName, "Aborting snapshot-v2, no longer cluster manager") + ); + return; + } + updateSnapshotPinnedTimestamp(repositoryData, snapshot, pinnedTimestamp, pinnedTimestampListener); + } + + @Override + public void onFailure(Exception e) { + logger.error("Failed to upload files to snapshot repo {} for snapshot-v2 {} ", repositoryName, snapshotName); + listener.onFailure(e); + } + } + ); + + }, listener::onFailure); + } catch (Exception e) { + assert false : new AssertionError(e); + logger.error("Snapshot-v2 {} creation failed with exception {}", snapshot.getSnapshotId().getName(), e); + listener.onFailure(e); + } + } + + private void createSnapshotPreValidations( + ClusterState currentState, + RepositoryData repositoryData, + String repositoryName, + String snapshotName + ) { + Repository repository = repositoriesService.repository(repositoryName); + ensureSnapshotNameAvailableInRepo(repositoryData, snapshotName, repository); + final SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); + final List runningSnapshots = snapshots.entries(); + ensureSnapshotNameNotRunning(runningSnapshots, repositoryName, snapshotName); + validate(repositoryName, snapshotName, currentState); + final RepositoryCleanupInProgress repositoryCleanupInProgress = currentState.custom( + RepositoryCleanupInProgress.TYPE, + RepositoryCleanupInProgress.EMPTY + ); + if (repositoryCleanupInProgress.hasCleanupInProgress()) { + throw new ConcurrentSnapshotExecutionException( + repositoryName, + snapshotName, + "cannot snapshot-v2 while a repository cleanup is in-progress in [" + repositoryCleanupInProgress + "]" + ); + } + ensureNoCleanupInProgress(currentState, repositoryName, snapshotName); + } + + private void updateSnapshotPinnedTimestamp( + RepositoryData repositoryData, + Snapshot snapshot, + long timestampToPin, + ActionListener listener + ) { + remoteStorePinnedTimestampService.pinTimestamp( + timestampToPin, + snapshot.getRepository() + SNAPSHOT_PINNED_TIMESTAMP_DELIMITER + snapshot.getSnapshotId().getUUID(), + new ActionListener() { + @Override + public void onResponse(Void unused) { + logger.debug("Timestamp pinned successfully for snapshot {}", snapshot.getSnapshotId().getName()); + listener.onResponse(repositoryData); + } + + @Override + public void onFailure(Exception e) { + logger.error("Failed to pin timestamp for snapshot {} with exception {}", snapshot.getSnapshotId().getName(), e); + listener.onFailure(e); + + } + } + ); + } + private static void ensureSnapshotNameNotRunning( List runningSnapshots, String repositoryName, @@ -903,15 +1096,21 @@ private static ShardGenerations buildGenerations(SnapshotsInProgress.Entry snaps return builder.build(); } - private static Metadata metadataForSnapshot(SnapshotsInProgress.Entry snapshot, Metadata metadata) { + private static Metadata metadataForSnapshot( + Metadata metadata, + boolean includeGlobalState, + boolean isPartial, + List dataStreamsList, + List indices + ) { final Metadata.Builder builder; - if (snapshot.includeGlobalState() == false) { + if (includeGlobalState == false) { // Remove global state from the cluster state builder = Metadata.builder(); - for (IndexId index : snapshot.indices()) { + for (IndexId index : indices) { final IndexMetadata indexMetadata = metadata.index(index.getName()); if (indexMetadata == null) { - assert snapshot.partial() : "Index [" + index + "] was deleted during a snapshot but snapshot was not partial."; + assert isPartial : "Index [" + index + "] was deleted during a snapshot but snapshot was not partial."; } else { builder.put(indexMetadata, false); } @@ -921,12 +1120,10 @@ private static Metadata metadataForSnapshot(SnapshotsInProgress.Entry snapshot, } // Only keep those data streams in the metadata that were actually requested by the initial snapshot create operation Map dataStreams = new HashMap<>(); - for (String dataStreamName : snapshot.dataStreams()) { + for (String dataStreamName : dataStreamsList) { DataStream dataStream = metadata.dataStreams().get(dataStreamName); if (dataStream == null) { - assert snapshot.partial() : "Data stream [" - + dataStreamName - + "] was deleted during a snapshot but snapshot was not partial."; + assert isPartial : "Data stream [" + dataStreamName + "] was deleted during a snapshot but snapshot was not partial."; } else { dataStreams.put(dataStreamName, dataStream); } @@ -1474,7 +1671,8 @@ private void finalizeSnapshotEntry(SnapshotsInProgress.Entry entry, Metadata met shardFailures, entry.includeGlobalState(), entry.userMetadata(), - entry.remoteStoreIndexShallowCopy() + entry.remoteStoreIndexShallowCopy(), + 0 ); final StepListener metadataListener = new StepListener<>(); final Repository repo = repositoriesService.repository(snapshot.getRepository()); @@ -1493,10 +1691,11 @@ private void finalizeSnapshotEntry(SnapshotsInProgress.Entry entry, Metadata met meta -> repo.finalizeSnapshot( shardGenerations, repositoryData.getGenId(), - metadataForSnapshot(entry, meta), + metadataForSnapshot(meta, entry.includeGlobalState(), entry.partial(), entry.dataStreams(), entry.indices()), snapshotInfo, entry.version(), state -> stateWithoutSnapshot(state, snapshot), + Priority.NORMAL, ActionListener.wrap(newRepoData -> { completeListenersIgnoringException(endAndGetListenersToResolve(snapshot), Tuple.tuple(newRepoData, snapshotInfo)); logger.info("snapshot [{}] completed with state [{}]", snapshot, snapshotInfo.state()); @@ -2673,6 +2872,42 @@ private static Map shards( return Collections.unmodifiableMap(builder); } + private static ShardGenerations buildShardsGenerationFromRepositoryData( + Metadata metadata, + RoutingTable routingTable, + List indices, + RepositoryData repositoryData + ) { + ShardGenerations.Builder builder = ShardGenerations.builder(); + final ShardGenerations shardGenerations = repositoryData.shardGenerations(); + + for (IndexId index : indices) { + final String indexName = index.getName(); + final boolean isNewIndex = repositoryData.getIndices().containsKey(indexName) == false; + IndexMetadata indexMetadata = metadata.index(indexName); + + final IndexRoutingTable indexRoutingTable = routingTable.index(indexName); + for (int i = 0; i < indexMetadata.getNumberOfShards(); i++) { + final ShardId shardId = indexRoutingTable.shard(i).shardId(); + final String shardRepoGeneration; + + if (isNewIndex) { + assert shardGenerations.getShardGen(index, shardId.getId()) == null : "Found shard generation for new index [" + + index + + "]"; + shardRepoGeneration = ShardGenerations.NEW_SHARD_GEN; + } else { + shardRepoGeneration = shardGenerations.getShardGen(index, shardId.id()); + } + builder.put(index, shardId.id(), shardRepoGeneration); + + } + + } + + return builder.build(); + } + /** * Returns the data streams that are currently being snapshotted (with partial == false) and that are contained in the * indices-to-check set. diff --git a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec index e030a813373c1..f51452c57f975 100644 --- a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec +++ b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec @@ -1 +1 @@ -org.opensearch.index.codec.composite.Composite99Codec +org.opensearch.index.codec.composite.composite99.Composite99Codec diff --git a/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java b/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java index 20c2b1f17124c..1e2b29287acb3 100644 --- a/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java +++ b/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java @@ -20,6 +20,7 @@ import org.opensearch.extensions.action.ExtensionTransportAction; import org.opensearch.extensions.rest.RestSendToExtensionAction; import org.opensearch.rest.NamedRoute; +import org.opensearch.rest.RestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; @@ -85,13 +86,17 @@ public void testDynamicActionRegistryWithNamedRoutes() { RestSendToExtensionAction action2 = mock(RestSendToExtensionAction.class); NamedRoute r1 = new NamedRoute.Builder().method(RestRequest.Method.GET).path("/foo").uniqueName("foo").build(); NamedRoute r2 = new NamedRoute.Builder().method(RestRequest.Method.PUT).path("/bar").uniqueName("bar").build(); + RestHandler.Route r3 = new RestHandler.Route(RestRequest.Method.DELETE, "/foo"); DynamicActionRegistry registry = new DynamicActionRegistry(); registry.registerDynamicRoute(r1, action); registry.registerDynamicRoute(r2, action2); assertTrue(registry.isActionRegistered("foo")); + assertEquals(action, registry.get(r1)); assertTrue(registry.isActionRegistered("bar")); + assertEquals(action2, registry.get(r2)); + assertNull(registry.get(r3)); registry.unregisterDynamicRoute(r2); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponseTests.java index 274a548fd98ab..2feb0d3ba9405 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponseTests.java @@ -95,7 +95,8 @@ protected CreateSnapshotResponse createTestInstance() { shardFailures, globalState, SnapshotInfoTests.randomUserMetadata(), - false + false, + 0 ) ); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java index 3ef143e36dab9..58af390d194d3 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java @@ -77,7 +77,8 @@ protected GetSnapshotsResponse createTestInstance() { shardFailures, randomBoolean(), SnapshotInfoTests.randomUserMetadata(), - false + false, + 0 ) ); } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java index e74962dcbba1b..3ee2278aec546 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java @@ -66,6 +66,8 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; @@ -971,7 +973,7 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, randomRepoName) .put(stateRepoTypeAttributeKey, FsRepository.TYPE) .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") - .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) .build(); final CoordinationState coordinationState = createCoordinationState(persistedStateRegistry, node1, settings); @@ -1002,6 +1004,16 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep ); } + public void testIsRemotePublicationEnabled_WithInconsistentSettings() { + // create settings with remote state disabled but publication enabled + Settings settings = Settings.builder() + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), false) + .put(REMOTE_PUBLICATION_EXPERIMENTAL, true) + .build(); + CoordinationState coordinationState = createCoordinationState(psr1, node1, settings); + assertFalse(coordinationState.isRemotePublicationEnabled()); + } + public static CoordinationState createCoordinationState( PersistedStateRegistry persistedStateRegistry, DiscoveryNode localNode, diff --git a/server/src/test/java/org/opensearch/index/codec/CodecTests.java b/server/src/test/java/org/opensearch/index/codec/CodecTests.java index 7146b7dc51753..bbf98b5c32918 100644 --- a/server/src/test/java/org/opensearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/opensearch/index/codec/CodecTests.java @@ -48,7 +48,7 @@ import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.IndexAnalyzers; -import org.opensearch.index.codec.composite.Composite99Codec; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.similarity.SimilarityService; diff --git a/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactoryTests.java b/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactoryTests.java new file mode 100644 index 0000000000000..7fb8fe7f68f45 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactoryTests.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.SegmentInfo; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.InfoStream; +import org.apache.lucene.util.Version; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.UUID; + +public class LuceneDocValuesConsumerFactoryTests extends OpenSearchTestCase { + + private Directory directory; + private final String dataCodec = "data_codec"; + private final String dataExtension = "data_extension"; + private final String metaCodec = "meta_codec"; + private final String metaExtension = "meta_extension"; + + @Before + public void setup() { + directory = newDirectory(); + } + + public void testGetDocValuesConsumerForCompositeCodec() throws IOException { + SegmentInfo segmentInfo = new SegmentInfo( + directory, + Version.LATEST, + Version.LUCENE_9_11_0, + "test_segment", + randomInt(), + false, + false, + new Lucene99Codec(), + new HashMap<>(), + UUID.randomUUID().toString().substring(0, 16).getBytes(StandardCharsets.UTF_8), + new HashMap<>(), + null + ); + SegmentWriteState state = new SegmentWriteState( + InfoStream.getDefault(), + segmentInfo.dir, + segmentInfo, + new FieldInfos(new FieldInfo[0]), + null, + newIOContext(random()) + ); + + DocValuesConsumer consumer = LuceneDocValuesConsumerFactory.getDocValuesConsumerForCompositeCodec( + state, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ); + + assertEquals("org.apache.lucene.codecs.lucene90.Lucene90DocValuesConsumer", consumer.getClass().getName()); + assertEquals(CompositeCodecFactory.COMPOSITE_CODEC, Composite99Codec.COMPOSITE_INDEX_CODEC_NAME); + consumer.close(); + } + + @After + public void teardown() throws Exception { + super.tearDown(); + directory.close(); + } +} diff --git a/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactoryTests.java b/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactoryTests.java new file mode 100644 index 0000000000000..55d637dfb9cae --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactoryTests.java @@ -0,0 +1,124 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.SegmentInfo; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.InfoStream; +import org.apache.lucene.util.Version; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.UUID; + +import static org.mockito.Mockito.mock; + +public class LuceneDocValuesProducerFactoryTests extends OpenSearchTestCase { + + private Directory directory; + private final String dataCodec = "data_codec"; + private final String dataExtension = "data_extension"; + private final String metaCodec = "meta_codec"; + private final String metaExtension = "meta_extension"; + + @Before + public void setup() { + directory = newDirectory(); + } + + public void testGetDocValuesProducerForCompositeCodec99() throws IOException { + SegmentInfo segmentInfo = new SegmentInfo( + directory, + Version.LATEST, + Version.LUCENE_9_11_0, + "test_segment", + randomInt(), + false, + false, + new Lucene99Codec(), + new HashMap<>(), + UUID.randomUUID().toString().substring(0, 16).getBytes(StandardCharsets.UTF_8), + new HashMap<>(), + null + ); + + // open an consumer first in order for the producer to find the file + SegmentWriteState state = new SegmentWriteState( + InfoStream.getDefault(), + segmentInfo.dir, + segmentInfo, + new FieldInfos(new FieldInfo[0]), + null, + newIOContext(random()) + ); + DocValuesConsumer consumer = LuceneDocValuesConsumerFactory.getDocValuesConsumerForCompositeCodec( + state, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ); + consumer.close(); + + SegmentReadState segmentReadState = new SegmentReadState( + segmentInfo.dir, + segmentInfo, + new FieldInfos(new FieldInfo[0]), + newIOContext(random()) + ); + DocValuesProducer producer = LuceneDocValuesProducerFactory.getDocValuesProducerForCompositeCodec( + Composite99Codec.COMPOSITE_INDEX_CODEC_NAME, + segmentReadState, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ); + + assertNotNull(producer); + assertEquals("org.apache.lucene.codecs.lucene90.Lucene90DocValuesProducer", producer.getClass().getName()); + producer.close(); + } + + public void testGetDocValuesProducerForCompositeCodec_InvalidCodec() { + SegmentReadState mockSegmentReadState = mock(SegmentReadState.class); + + IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { + LuceneDocValuesProducerFactory.getDocValuesProducerForCompositeCodec( + "invalid_codec", + mockSegmentReadState, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ); + }); + + assertNotNull(exception); + assertTrue(exception.getMessage().contains("Invalid composite codec")); + } + + @After + public void teardown() throws Exception { + super.tearDown(); + directory.close(); + } +} diff --git a/server/src/test/java/org/opensearch/index/codec/composite/SortedNumericDocValuesWriterWrapperTests.java b/server/src/test/java/org/opensearch/index/codec/composite/SortedNumericDocValuesWriterWrapperTests.java new file mode 100644 index 0000000000000..54eead20ef354 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/composite/SortedNumericDocValuesWriterWrapperTests.java @@ -0,0 +1,94 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedNumericDocValuesWriterWrapper; +import org.apache.lucene.index.VectorEncoding; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.apache.lucene.util.Counter; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Collections; + +public class SortedNumericDocValuesWriterWrapperTests extends OpenSearchTestCase { + + private SortedNumericDocValuesWriterWrapper wrapper; + private FieldInfo fieldInfo; + private Counter counter; + + @Override + public void setUp() throws Exception { + super.setUp(); + fieldInfo = new FieldInfo( + "field", + 1, + false, + false, + true, + IndexOptions.NONE, + DocValuesType.NONE, + -1, + Collections.emptyMap(), + 0, + 0, + 0, + 0, + VectorEncoding.FLOAT32, + VectorSimilarityFunction.EUCLIDEAN, + false, + false + ); + counter = Counter.newCounter(); + wrapper = new SortedNumericDocValuesWriterWrapper(fieldInfo, counter); + } + + public void testAddValue() throws IOException { + wrapper.addValue(0, 10); + wrapper.addValue(1, 20); + wrapper.addValue(2, 30); + + SortedNumericDocValues docValues = wrapper.getDocValues(); + assertNotNull(docValues); + + assertEquals(0, docValues.nextDoc()); + assertEquals(10, docValues.nextValue()); + assertEquals(1, docValues.nextDoc()); + assertEquals(20, docValues.nextValue()); + assertEquals(2, docValues.nextDoc()); + assertEquals(30, docValues.nextValue()); + } + + public void testGetDocValues() { + SortedNumericDocValues docValues = wrapper.getDocValues(); + assertNotNull(docValues); + } + + public void testMultipleValues() throws IOException { + wrapper.addValue(0, 10); + wrapper.addValue(0, 20); + wrapper.addValue(1, 30); + + SortedNumericDocValues docValues = wrapper.getDocValues(); + assertNotNull(docValues); + + assertEquals(0, docValues.nextDoc()); + assertEquals(10, docValues.nextValue()); + assertEquals(20, docValues.nextValue()); + assertThrows(IllegalStateException.class, docValues::nextValue); + + assertEquals(1, docValues.nextDoc()); + assertEquals(30, docValues.nextValue()); + assertThrows(IllegalStateException.class, docValues::nextValue); + } +} diff --git a/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java b/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java index 6fa88215cad48..54a9cc035d7a9 100644 --- a/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java +++ b/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java @@ -29,7 +29,7 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.MapperTestUtils; -import org.opensearch.index.codec.composite.Composite99Codec; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.IndicesModule; import org.junit.After; diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/AbstractValueAggregatorTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/AbstractValueAggregatorTests.java index f5d3e197aa287..36f75834abba8 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/AbstractValueAggregatorTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/AbstractValueAggregatorTests.java @@ -48,11 +48,7 @@ public void testGetInitialAggregatedValueForSegmentDocNullValue() { } public void testMergeAggregatedNullValueAndSegmentNullValue() { - if (aggregator instanceof CountValueAggregator) { - assertThrows(AssertionError.class, () -> aggregator.mergeAggregatedValueAndSegmentValue(null, null)); - } else { - assertEquals(aggregator.getIdentityMetricValue(), aggregator.mergeAggregatedValueAndSegmentValue(null, null)); - } + assertEquals(aggregator.getIdentityMetricValue(), aggregator.mergeAggregatedValueAndSegmentValue(null, null)); } public void testMergeAggregatedNullValues() { @@ -65,13 +61,6 @@ public void testGetInitialAggregatedNullValue() { public void testGetInitialAggregatedValueForSegmentDocValue() { long randomLong = randomLong(); - if (aggregator instanceof CountValueAggregator) { - assertEquals(CountValueAggregator.DEFAULT_INITIAL_VALUE, aggregator.getInitialAggregatedValueForSegmentDocValue(randomLong())); - } else { - assertEquals( - starTreeNumericType.getDoubleValue(randomLong), - aggregator.getInitialAggregatedValueForSegmentDocValue(randomLong) - ); - } + assertEquals(starTreeNumericType.getDoubleValue(randomLong), aggregator.getInitialAggregatedValueForSegmentDocValue(randomLong)); } } diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java index 550a4fea1174a..b270c1b1bc26c 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java @@ -31,6 +31,11 @@ public void testMergeAggregatedValues() { assertEquals(randomLong2, aggregator.mergeAggregatedValues(null, randomLong2), 0.0); } + @Override + public void testMergeAggregatedNullValueAndSegmentNullValue() { + assertThrows(AssertionError.class, () -> aggregator.mergeAggregatedValueAndSegmentValue(null, null)); + } + public void testGetInitialAggregatedValue() { long randomLong = randomLong(); assertEquals(randomLong, aggregator.getInitialAggregatedValue(randomLong), 0.0); @@ -48,8 +53,13 @@ public void testIdentityMetricValue() { @Override public ValueAggregator getValueAggregator(StarTreeNumericType starTreeNumericType) { - aggregator = new CountValueAggregator(starTreeNumericType); + aggregator = new CountValueAggregator(); return aggregator; } + @Override + public void testGetInitialAggregatedValueForSegmentDocValue() { + long randomLong = randomLong(); + assertEquals(CountValueAggregator.DEFAULT_INITIAL_VALUE, (long) aggregator.getInitialAggregatedValueForSegmentDocValue(randomLong)); + } } diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregatorTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregatorTests.java new file mode 100644 index 0000000000000..2765629aa5950 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregatorTests.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.aggregators; + +import org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype.StarTreeNumericType; + +/** + * Unit tests for {@link DocCountAggregator}. + */ +public class DocCountAggregatorTests extends AbstractValueAggregatorTests { + + private DocCountAggregator aggregator; + + public DocCountAggregatorTests(StarTreeNumericType starTreeNumericType) { + super(starTreeNumericType); + } + + public void testMergeAggregatedValueAndSegmentValue() { + long randomLong = randomLong(); + assertEquals(randomLong + 3L, (long) aggregator.mergeAggregatedValueAndSegmentValue(randomLong, 3L)); + } + + public void testMergeAggregatedValues() { + long randomLong1 = randomLong(); + long randomLong2 = randomLong(); + assertEquals(randomLong1 + randomLong2, (long) aggregator.mergeAggregatedValues(randomLong1, randomLong2)); + assertEquals(randomLong1 + 1L, (long) aggregator.mergeAggregatedValues(randomLong1, null)); + assertEquals(randomLong2 + 1L, (long) aggregator.mergeAggregatedValues(null, randomLong2)); + } + + @Override + public void testMergeAggregatedNullValueAndSegmentNullValue() { + assertThrows(AssertionError.class, () -> aggregator.mergeAggregatedValueAndSegmentValue(null, null)); + } + + @Override + public void testMergeAggregatedNullValues() { + assertEquals( + (aggregator.getIdentityMetricValue() + aggregator.getIdentityMetricValue()), + (long) aggregator.mergeAggregatedValues(null, null) + ); + } + + public void testGetInitialAggregatedValue() { + long randomLong = randomLong(); + assertEquals(randomLong, (long) aggregator.getInitialAggregatedValue(randomLong)); + } + + public void testToStarTreeNumericTypeValue() { + long randomLong = randomLong(); + assertEquals(randomLong, (long) aggregator.toAggregatedValueType(randomLong)); + } + + public void testIdentityMetricValue() { + assertEquals(1L, (long) aggregator.getIdentityMetricValue()); + } + + @Override + public ValueAggregator getValueAggregator(StarTreeNumericType starTreeNumericType) { + aggregator = new DocCountAggregator(); + return aggregator; + } + + @Override + public void testGetInitialAggregatedValueForSegmentDocValue() { + long randomLong = randomLong(); + assertEquals(randomLong, (long) aggregator.getInitialAggregatedValueForSegmentDocValue(randomLong)); + } +} diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java index 389b6cb34f085..e77f184ac0243 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java @@ -55,6 +55,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -94,7 +95,8 @@ public void setup() throws IOException { new Metric("field4", List.of(MetricStat.SUM)), new Metric("field6", List.of(MetricStat.VALUE_COUNT)), new Metric("field9", List.of(MetricStat.MIN)), - new Metric("field10", List.of(MetricStat.MAX)) + new Metric("field10", List.of(MetricStat.MAX)), + new Metric("_doc_count", List.of(MetricStat.DOC_COUNT)) ); DocValuesProducer docValuesProducer = mock(DocValuesProducer.class); @@ -187,11 +189,26 @@ public void test_sortAndAggregateStarTreeDocuments() throws IOException { int noOfStarTreeDocuments = 5; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble(), 8.0, 20.0 }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, 12.0, randomDouble(), 6.0, 24.0 }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, 16.0, randomDouble(), 8.0, 13.0 }); + starTreeDocuments[0] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 12.0, 10.0, randomDouble(), 8.0, 20.0, 10L } + ); + starTreeDocuments[1] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 10.0, 6.0, randomDouble(), 12.0, 10.0, 10L } + ); + starTreeDocuments[2] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 14.0, 12.0, randomDouble(), 6.0, 24.0, 10L } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 9.0, 4.0, randomDouble(), 9.0, 12.0, null } + ); + starTreeDocuments[4] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 11.0, 16.0, randomDouble(), 8.0, 13.0, null } + ); StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; for (int i = 0; i < noOfStarTreeDocuments; i++) { @@ -200,14 +217,15 @@ public void test_sortAndAggregateStarTreeDocuments() throws IOException { long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); long metric4 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[3]); long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); + Long metric6 = (Long) starTreeDocuments[i].metrics[5]; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, metric6 } ); } List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0, 11L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0, 21L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -233,6 +251,7 @@ public void test_sortAndAggregateStarTreeDocuments() throws IOException { assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); numOfAggregatedDocuments++; } @@ -280,15 +299,15 @@ public void test_sortAndAggregateStarTreeDocuments_nullMetric() throws IOExcepti int noOfStarTreeDocuments = 5; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble(), 8.0, 20.0 }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, 12.0, randomDouble(), 6.0, 24.0 }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, null, randomDouble(), 8.0, 13.0 }); + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 12.0, 10.0, randomDouble(), 8.0, 20.0 }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 14.0, 12.0, randomDouble(), 6.0, 24.0 }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 11.0, null, randomDouble(), 8.0, 13.0 }); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 18.0, 3L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 18.0, 3L, 6.0, 24.0, 3L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -303,7 +322,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullMetric() throws IOExcepti long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Object[] { metric1, metric2, metric3, metric4, metric5 } + new Object[] { metric1, metric2, metric3, metric4, metric5, null } ); } SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); @@ -326,6 +345,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullMetric() throws IOExcepti assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); } } @@ -334,15 +354,30 @@ public void test_sortAndAggregateStarTreeDocuments_nullMetricField() throws IOEx int noOfStarTreeDocuments = 5; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; // Setting second metric iterator as empty sorted numeric , indicating a metric field is null - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, null, randomDouble(), 8.0, 20.0 }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, null, randomDouble(), 12.0, 10.0 }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, null, randomDouble(), 6.0, 24.0 }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, null, randomDouble(), 9.0, 12.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, null, randomDouble(), 8.0, 13.0 }); + starTreeDocuments[0] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 12.0, null, randomDouble(), 8.0, 20.0, null } + ); + starTreeDocuments[1] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 10.0, null, randomDouble(), 12.0, 10.0, null } + ); + starTreeDocuments[2] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 14.0, null, randomDouble(), 6.0, 24.0, null } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 9.0, null, randomDouble(), 9.0, 12.0, 10L } + ); + starTreeDocuments[4] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 11.0, null, randomDouble(), 8.0, 13.0, null } + ); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 0.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 0.0, 3L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 0.0, 2L, 8.0, 20.0, 11L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 0.0, 3L, 6.0, 24.0, 3L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -355,9 +390,10 @@ public void test_sortAndAggregateStarTreeDocuments_nullMetricField() throws IOEx long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); long metric4 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[3]); long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); + Long metric6 = starTreeDocuments[i].metrics[5] != null ? (Long) starTreeDocuments[i].metrics[5] : null; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Object[] { metric1, metric2, metric3, metric4, metric5 } + new Object[] { metric1, metric2, metric3, metric4, metric5, metric6 } ); } SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); @@ -380,6 +416,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullMetricField() throws IOEx assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); } } @@ -390,18 +427,18 @@ public void test_sortAndAggregateStarTreeDocuments_nullAndMinusOneInDimensionFie // Setting second metric iterator as empty sorted numeric , indicating a metric field is null starTreeDocuments[0] = new StarTreeDocument( new Long[] { 2L, null, 3L, 4L }, - new Double[] { 12.0, null, randomDouble(), 8.0, 20.0 } + new Object[] { 12.0, null, randomDouble(), 8.0, 20.0 } ); starTreeDocuments[1] = new StarTreeDocument( new Long[] { null, 4L, 2L, 1L }, - new Double[] { 10.0, null, randomDouble(), 12.0, 10.0 } + new Object[] { 10.0, null, randomDouble(), 12.0, 10.0 } ); starTreeDocuments[2] = new StarTreeDocument( new Long[] { null, 4L, 2L, 1L }, - new Double[] { 14.0, null, randomDouble(), 6.0, 24.0 } + new Object[] { 14.0, null, randomDouble(), 6.0, 24.0 } ); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, null, 3L, 4L }, new Double[] { 9.0, null, randomDouble(), 9.0, 12.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { -1L, 4L, 2L, 1L }, new Double[] { 11.0, null, randomDouble(), 8.0, 13.0 }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, null, 3L, 4L }, new Object[] { 9.0, null, randomDouble(), 9.0, 12.0 }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { -1L, 4L, 2L, 1L }, new Object[] { 11.0, null, randomDouble(), 8.0, 13.0 }); List inorderStarTreeDocuments = List.of( new StarTreeDocument(new Long[] { 2L, null, 3L, 4L }, new Object[] { 21.0, 0.0, 2L }), @@ -443,6 +480,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullAndMinusOneInDimensionFie assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); } builder.build(segmentStarTreeDocumentIterator); validateStarTree(builder.getRootNode(), 4, 1, builder.getStarTreeDocuments()); @@ -452,14 +490,29 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndNullMetrics( int noOfStarTreeDocuments = 5; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; // Setting second metric iterator as empty sorted numeric , indicating a metric field is null - starTreeDocuments[0] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); + starTreeDocuments[0] = new StarTreeDocument( + new Long[] { null, null, null, null }, + new Object[] { null, null, null, null, null, null } + ); + starTreeDocuments[1] = new StarTreeDocument( + new Long[] { null, null, null, null }, + new Object[] { null, null, null, null, null, null } + ); + starTreeDocuments[2] = new StarTreeDocument( + new Long[] { null, null, null, null }, + new Object[] { null, null, null, null, null, null } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { null, null, null, null }, + new Object[] { null, null, null, null, null, null } + ); + starTreeDocuments[4] = new StarTreeDocument( + new Long[] { null, null, null, null }, + new Object[] { null, null, null, null, null, null } + ); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { 0.0, 0.0, 0L, null, null }) + new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { 0.0, 0.0, 0L, null, null, 5L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -482,7 +535,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndNullMetrics( : null; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Object[] { metric1, metric2, metric3, metric4, metric5 } + new Object[] { metric1, metric2, metric3, metric4, metric5, null } ); } SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); @@ -505,6 +558,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndNullMetrics( assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); } builder.build(segmentStarTreeDocumentIterator); validateStarTree(builder.getRootNode(), 4, 1, builder.getStarTreeDocuments()); @@ -521,21 +575,21 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndFewNullMetri // Setting second metric iterator as empty sorted numeric , indicating a metric field is null starTreeDocuments[0] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { null, null, randomDouble(), null, maxValue } + new Object[] { null, null, randomDouble(), null, maxValue } ); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { null, null, null, null, null }); starTreeDocuments[2] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { null, null, null, minValue, null } + new Object[] { null, null, null, minValue, null } ); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { null, null, null, null, null }); starTreeDocuments[4] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { sumValue, null, randomDouble(), null, null } + new Object[] { sumValue, null, randomDouble(), null, null } ); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { sumValue, 0.0, 2L, minValue, maxValue }) + new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { sumValue, 0.0, 2L, minValue, maxValue, 5L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -558,7 +612,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndFewNullMetri : null; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Object[] { metric1, metric2, metric3, metric4, metric5 } + new Object[] { metric1, metric2, metric3, metric4, metric5, null } ); } SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); @@ -581,6 +635,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndFewNullMetri assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); } builder.build(segmentStarTreeDocumentIterator); validateStarTree(builder.getRootNode(), 4, 1, builder.getStarTreeDocuments()); @@ -593,27 +648,27 @@ public void test_sortAndAggregateStarTreeDocuments_emptyDimensions() throws IOEx // Setting second metric iterator as empty sorted numeric , indicating a metric field is null starTreeDocuments[0] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { 12.0, null, randomDouble(), 8.0, 20.0 } + new Object[] { 12.0, null, randomDouble(), 8.0, 20.0, 10L } ); starTreeDocuments[1] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { 10.0, null, randomDouble(), 12.0, 10.0 } + new Object[] { 10.0, null, randomDouble(), 12.0, 10.0, 10L } ); starTreeDocuments[2] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { 14.0, null, randomDouble(), 6.0, 24.0 } + new Object[] { 14.0, null, randomDouble(), 6.0, 24.0, 10L } ); starTreeDocuments[3] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { 9.0, null, randomDouble(), 9.0, 12.0 } + new Object[] { 9.0, null, randomDouble(), 9.0, 12.0, 10L } ); starTreeDocuments[4] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { 11.0, null, randomDouble(), 8.0, 13.0 } + new Object[] { 11.0, null, randomDouble(), 8.0, 13.0, 10L } ); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { 56.0, 0.0, 5L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { 56.0, 0.0, 5L, 6.0, 24.0, 50L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -626,9 +681,10 @@ public void test_sortAndAggregateStarTreeDocuments_emptyDimensions() throws IOEx Long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); Long metric4 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[3]); Long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); + Long metric6 = (Long) starTreeDocuments[i].metrics[5]; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Object[] { metric1, metric2, metric3, metric4, metric5 } + new Object[] { metric1, metric2, metric3, metric4, metric5, metric6 } ); } SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); @@ -651,6 +707,7 @@ public void test_sortAndAggregateStarTreeDocuments_emptyDimensions() throws IOEx assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); } } @@ -661,28 +718,28 @@ public void test_sortAndAggregateStarTreeDocument_longMaxAndLongMinDimensions() starTreeDocuments[0] = new StarTreeDocument( new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, - new Double[] { 12.0, 10.0, randomDouble(), 8.0, 20.0 } + new Object[] { 12.0, 10.0, randomDouble(), 8.0, 20.0 } ); starTreeDocuments[1] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, - new Double[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 } + new Object[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 } ); starTreeDocuments[2] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, - new Double[] { 14.0, 12.0, randomDouble(), 6.0, 24.0 } + new Object[] { 14.0, 12.0, randomDouble(), 6.0, 24.0 } ); starTreeDocuments[3] = new StarTreeDocument( new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, - new Double[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 } + new Object[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 } ); starTreeDocuments[4] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, - new Double[] { 11.0, 16.0, randomDouble(), 8.0, 13.0 } + new Object[] { 11.0, 16.0, randomDouble(), 8.0, 13.0 } ); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0, 3L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -695,7 +752,7 @@ public void test_sortAndAggregateStarTreeDocument_longMaxAndLongMinDimensions() long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, null } ); } @@ -720,6 +777,7 @@ public void test_sortAndAggregateStarTreeDocument_longMaxAndLongMinDimensions() assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); numOfAggregatedDocuments++; } @@ -735,19 +793,28 @@ public void test_sortAndAggregateStarTreeDocument_DoubleMaxAndDoubleMinMetrics() starTreeDocuments[0] = new StarTreeDocument( new Long[] { 2L, 4L, 3L, 4L }, - new Double[] { Double.MAX_VALUE, 10.0, randomDouble(), 8.0, 20.0 } + new Object[] { Double.MAX_VALUE, 10.0, randomDouble(), 8.0, 20.0, 100L } + ); + starTreeDocuments[1] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 10.0, 6.0, randomDouble(), 12.0, 10.0, 100L } ); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 }); starTreeDocuments[2] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, 1L }, - new Double[] { 14.0, Double.MIN_VALUE, randomDouble(), 6.0, 24.0 } + new Object[] { 14.0, Double.MIN_VALUE, randomDouble(), 6.0, 24.0, 100L } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 9.0, 4.0, randomDouble(), 9.0, 12.0, 100L } + ); + starTreeDocuments[4] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 11.0, 16.0, randomDouble(), 8.0, 13.0, 100L } ); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, 16.0, randomDouble(), 8.0, 13.0 }); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { Double.MAX_VALUE + 9, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, Double.MIN_VALUE + 22, 3L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { Double.MAX_VALUE + 9, 14.0, 2L, 8.0, 20.0, 200L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, Double.MIN_VALUE + 22, 3L, 6.0, 24.0, 300L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -758,9 +825,10 @@ public void test_sortAndAggregateStarTreeDocument_DoubleMaxAndDoubleMinMetrics() long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); long metric4 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[3]); long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); + Long metric6 = (Long) starTreeDocuments[i].metrics[5]; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, metric6 } ); } @@ -892,7 +960,7 @@ public void test_build_halfFloatMetrics() throws IOException { ); segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, null } ); } @@ -943,20 +1011,23 @@ public void test_build_floatMetrics() throws IOException { starTreeDocuments[0] = new StarTreeDocument( new Long[] { 2L, 4L, 3L, 4L }, - new Float[] { 12.0F, 10.0F, randomFloat(), 8.0F, 20.0F } + new Object[] { 12.0F, 10.0F, randomFloat(), 8.0F, 20.0F, null } ); starTreeDocuments[1] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, 1L }, - new Float[] { 10.0F, 6.0F, randomFloat(), 12.0F, 10.0F } + new Object[] { 10.0F, 6.0F, randomFloat(), 12.0F, 10.0F, null } ); starTreeDocuments[2] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, 1L }, - new Float[] { 14.0F, 12.0F, randomFloat(), 6.0F, 24.0F } + new Object[] { 14.0F, 12.0F, randomFloat(), 6.0F, 24.0F, null } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 9.0F, 4.0F, randomFloat(), 9.0F, 12.0F, null } ); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Float[] { 9.0F, 4.0F, randomFloat(), 9.0F, 12.0F }); starTreeDocuments[4] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, 1L }, - new Float[] { 11.0F, 16.0F, randomFloat(), 8.0F, 13.0F } + new Object[] { 11.0F, 16.0F, randomFloat(), 8.0F, 13.0F, null } ); StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; @@ -966,9 +1037,10 @@ public void test_build_floatMetrics() throws IOException { long metric3 = NumericUtils.floatToSortableInt((Float) starTreeDocuments[i].metrics[2]); long metric4 = NumericUtils.floatToSortableInt((Float) starTreeDocuments[i].metrics[3]); long metric5 = NumericUtils.floatToSortableInt((Float) starTreeDocuments[i].metrics[4]); + Long metric6 = (Long) starTreeDocuments[i].metrics[5]; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, metric6 } ); } @@ -1031,7 +1103,7 @@ public void test_build_longMetrics() throws IOException { long metric5 = (Long) starTreeDocuments[i].metrics[4]; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, null } ); } @@ -1053,13 +1125,13 @@ public void test_build_longMetrics() throws IOException { private static Iterator getExpectedStarTreeDocumentIterator() { List expectedStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0 }), - new StarTreeDocument(new Long[] { null, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0 }), - new StarTreeDocument(new Long[] { null, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { null, 4L, null, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0 }), - new StarTreeDocument(new Long[] { null, 4L, null, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { null, 4L, null, null }, new Object[] { 56.0, 48.0, 5L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0, 3L }), + new StarTreeDocument(new Long[] { null, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0, 3L }), + new StarTreeDocument(new Long[] { null, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0, 2L }), + new StarTreeDocument(new Long[] { null, 4L, null, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0, 3L }), + new StarTreeDocument(new Long[] { null, 4L, null, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0, 2L }), + new StarTreeDocument(new Long[] { null, 4L, null, null }, new Object[] { 56.0, 48.0, 5L, 6.0, 24.0, 5L }) ); return expectedStarTreeDocuments.iterator(); } @@ -1069,11 +1141,26 @@ public void test_build() throws IOException { int noOfStarTreeDocuments = 5; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble(), 8.0, 20.0 }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, 12.0, randomDouble(), 6.0, 24.0 }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, 16.0, randomDouble(), 8.0, 13.0 }); + starTreeDocuments[0] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 12.0, 10.0, randomDouble(), 8.0, 20.0, 1L } + ); + starTreeDocuments[1] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 10.0, 6.0, randomDouble(), 12.0, 10.0, null } + ); + starTreeDocuments[2] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 14.0, 12.0, randomDouble(), 6.0, 24.0, null } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 9.0, 4.0, randomDouble(), 9.0, 12.0, null } + ); + starTreeDocuments[4] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 11.0, 16.0, randomDouble(), 8.0, 13.0, null } + ); StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; for (int i = 0; i < noOfStarTreeDocuments; i++) { @@ -1082,9 +1169,10 @@ public void test_build() throws IOException { long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); long metric4 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[3]); long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); + Long metric6 = (Long) starTreeDocuments[i].metrics[5]; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, metric6 } ); } @@ -1130,7 +1218,7 @@ public void test_build_starTreeDataset() throws IOException { fields = List.of("fieldC", "fieldB", "fieldL", "fieldI"); dimensionsOrder = List.of(new NumericDimension("fieldC"), new NumericDimension("fieldB"), new NumericDimension("fieldL")); - metrics = List.of(new Metric("fieldI", List.of(MetricStat.SUM))); + metrics = List.of(new Metric("fieldI", List.of(MetricStat.SUM)), new Metric("_doc_count", List.of(MetricStat.DOC_COUNT))); DocValuesProducer docValuesProducer = mock(DocValuesProducer.class); @@ -1199,18 +1287,18 @@ public void test_build_starTreeDataset() throws IOException { int noOfStarTreeDocuments = 7; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 1L, 11L, 21L }, new Double[] { 400.0 }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 1L, 12L, 22L }, new Double[] { 200.0 }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 2L, 13L, 23L }, new Double[] { 300.0 }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 13L, 21L }, new Double[] { 100.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 11L, 21L }, new Double[] { 600.0 }); - starTreeDocuments[5] = new StarTreeDocument(new Long[] { 3L, 12L, 23L }, new Double[] { 200.0 }); - starTreeDocuments[6] = new StarTreeDocument(new Long[] { 3L, 12L, 21L }, new Double[] { 400.0 }); + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 1L, 11L, 21L }, new Object[] { 400.0, null }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 1L, 12L, 22L }, new Object[] { 200.0, null }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 2L, 13L, 23L }, new Object[] { 300.0, null }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 13L, 21L }, new Object[] { 100.0, null }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 11L, 21L }, new Object[] { 600.0, null }); + starTreeDocuments[5] = new StarTreeDocument(new Long[] { 3L, 12L, 23L }, new Object[] { 200.0, null }); + starTreeDocuments[6] = new StarTreeDocument(new Long[] { 3L, 12L, 21L }, new Object[] { 400.0, null }); StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; for (int i = 0; i < noOfStarTreeDocuments; i++) { long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); - segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1 }); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, null }); } SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); @@ -1250,6 +1338,7 @@ public void test_build_starTreeDataset() throws IOException { assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); + assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); } validateStarTree(builder.getRootNode(), 3, 1, builder.getStarTreeDocuments()); } @@ -1278,33 +1367,33 @@ private static Map> getExpectedDimToValueMap() { private Iterator expectedStarTreeDocuments() { List expectedStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 1L, 11L, 21L }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 1L, 12L, 22L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { 2L, 13L, 21L }, new Object[] { 100.0 }), - new StarTreeDocument(new Long[] { 2L, 13L, 23L }, new Object[] { 300.0 }), - new StarTreeDocument(new Long[] { 3L, 11L, 21L }, new Object[] { 600.0 }), - new StarTreeDocument(new Long[] { 3L, 12L, 21L }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 3L, 12L, 23L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { null, 11L, 21L }, new Object[] { 1000.0 }), - new StarTreeDocument(new Long[] { null, 12L, 21L }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { null, 12L, 22L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { null, 12L, 23L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { null, 13L, 21L }, new Object[] { 100.0 }), - new StarTreeDocument(new Long[] { null, 13L, 23L }, new Object[] { 300.0 }), - new StarTreeDocument(new Long[] { null, null, 21L }, new Object[] { 1500.0 }), - new StarTreeDocument(new Long[] { null, null, 22L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { null, null, 23L }, new Object[] { 500.0 }), - new StarTreeDocument(new Long[] { null, null, null }, new Object[] { 2200.0 }), - new StarTreeDocument(new Long[] { null, 12L, null }, new Object[] { 800.0 }), - new StarTreeDocument(new Long[] { null, 13L, null }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 1L, null, 21L }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 1L, null, 22L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { 1L, null, null }, new Object[] { 600.0 }), - new StarTreeDocument(new Long[] { 2L, 13L, null }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 3L, null, 21L }, new Object[] { 1000.0 }), - new StarTreeDocument(new Long[] { 3L, null, 23L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { 3L, null, null }, new Object[] { 1200.0 }), - new StarTreeDocument(new Long[] { 3L, 12L, null }, new Object[] { 600.0 }) + new StarTreeDocument(new Long[] { 1L, 11L, 21L }, new Object[] { 400.0, 1L }), + new StarTreeDocument(new Long[] { 1L, 12L, 22L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { 2L, 13L, 21L }, new Object[] { 100.0, 1L }), + new StarTreeDocument(new Long[] { 2L, 13L, 23L }, new Object[] { 300.0, 1L }), + new StarTreeDocument(new Long[] { 3L, 11L, 21L }, new Object[] { 600.0, 1L }), + new StarTreeDocument(new Long[] { 3L, 12L, 21L }, new Object[] { 400.0, 1L }), + new StarTreeDocument(new Long[] { 3L, 12L, 23L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { null, 11L, 21L }, new Object[] { 1000.0, 2L }), + new StarTreeDocument(new Long[] { null, 12L, 21L }, new Object[] { 400.0, 1L }), + new StarTreeDocument(new Long[] { null, 12L, 22L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { null, 12L, 23L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { null, 13L, 21L }, new Object[] { 100.0, 1L }), + new StarTreeDocument(new Long[] { null, 13L, 23L }, new Object[] { 300.0, 1L }), + new StarTreeDocument(new Long[] { null, null, 21L }, new Object[] { 1500.0, 4L }), + new StarTreeDocument(new Long[] { null, null, 22L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { null, null, 23L }, new Object[] { 500.0, 2L }), + new StarTreeDocument(new Long[] { null, null, null }, new Object[] { 2200.0, 7L }), + new StarTreeDocument(new Long[] { null, 12L, null }, new Object[] { 800.0, 3L }), + new StarTreeDocument(new Long[] { null, 13L, null }, new Object[] { 400.0, 2L }), + new StarTreeDocument(new Long[] { 1L, null, 21L }, new Object[] { 400.0, 1L }), + new StarTreeDocument(new Long[] { 1L, null, 22L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { 1L, null, null }, new Object[] { 600.0, 2L }), + new StarTreeDocument(new Long[] { 2L, 13L, null }, new Object[] { 400.0, 2L }), + new StarTreeDocument(new Long[] { 3L, null, 21L }, new Object[] { 1000.0, 2L }), + new StarTreeDocument(new Long[] { 3L, null, 23L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { 3L, null, null }, new Object[] { 1200.0, 3L }), + new StarTreeDocument(new Long[] { 3L, 12L, null }, new Object[] { 600.0, 2L }) ); return expectedStarTreeDocuments.iterator(); @@ -2209,8 +2298,14 @@ public void testMergeFlowWithDuplicateDimensionValues() throws IOException { metricsList.add(getLongFromDouble(i * 10.0)); metricsWithField.add(i); } + List docCountMetricsList = new ArrayList<>(100); + List docCountMetricsWithField = new ArrayList<>(100); + for (int i = 0; i < 500; i++) { + docCountMetricsList.add(i * 10L); + docCountMetricsWithField.add(i); + } - StarTreeField sf = getStarTreeField(1); + StarTreeField sf = getStarTreeFieldWithDocCount(1, true); StarTreeValues starTreeValues = getStarTreeValues( dimList1, docsWithField1, @@ -2222,6 +2317,8 @@ public void testMergeFlowWithDuplicateDimensionValues() throws IOException { docsWithField4, metricsList, metricsWithField, + docCountMetricsList, + docCountMetricsWithField, sf ); @@ -2236,6 +2333,8 @@ public void testMergeFlowWithDuplicateDimensionValues() throws IOException { docsWithField4, metricsList, metricsWithField, + docCountMetricsList, + docCountMetricsWithField, sf ); builder = getStarTreeBuilder(sf, writeState, mapperService); @@ -2246,23 +2345,26 @@ public void testMergeFlowWithDuplicateDimensionValues() throws IOException { double sum = 0; /** 401 docs get generated - [0, 0, 0, 0] | [200.0] - [1, 1, 1, 1] | [700.0] - [2, 2, 2, 2] | [1200.0] - [3, 3, 3, 3] | [1700.0] - [4, 4, 4, 4] | [2200.0] + [0, 0, 0, 0] | [200.0, 10] + [1, 1, 1, 1] | [700.0, 10] + [2, 2, 2, 2] | [1200.0, 10] + [3, 3, 3, 3] | [1700.0, 10] + [4, 4, 4, 4] | [2200.0, 10] ..... - [null, null, null, 99] | [49700.0] - [null, null, null, null] | [2495000.0] + [null, null, null, 99] | [49700.0, 10] + [null, null, null, null] | [2495000.0, 1000] */ for (StarTreeDocument starTreeDocument : starTreeDocuments) { if (starTreeDocument.dimensions[3] == null) { assertEquals(sum, starTreeDocument.metrics[0]); + assertEquals(2495000L, (long) starTreeDocument.metrics[1]); } else { if (starTreeDocument.dimensions[0] != null) { sum += (double) starTreeDocument.metrics[0]; } assertEquals(starTreeDocument.dimensions[3] * 500 + 200.0, starTreeDocument.metrics[0]); + assertEquals(starTreeDocument.dimensions[3] * 500 + 200L, (long) starTreeDocument.metrics[1]); + } count++; } @@ -2319,7 +2421,14 @@ public void testMergeFlowWithMaxLeafDocs() throws IOException { metricsWithField.add(i); } - StarTreeField sf = getStarTreeField(3); + List metricsList1 = new ArrayList<>(100); + List metricsWithField1 = new ArrayList<>(100); + for (int i = 0; i < 500; i++) { + metricsList1.add(1L); + metricsWithField1.add(i); + } + + StarTreeField sf = getStarTreeFieldWithDocCount(3, true); StarTreeValues starTreeValues = getStarTreeValues( dimList1, docsWithField1, @@ -2331,6 +2440,8 @@ public void testMergeFlowWithMaxLeafDocs() throws IOException { docsWithField4, metricsList, metricsWithField, + metricsList1, + metricsWithField1, sf ); @@ -2345,6 +2456,8 @@ public void testMergeFlowWithMaxLeafDocs() throws IOException { docsWithField4, metricsList, metricsWithField, + metricsList1, + metricsWithField1, sf ); @@ -2353,17 +2466,58 @@ public void testMergeFlowWithMaxLeafDocs() throws IOException { List starTreeDocuments = builder.getStarTreeDocuments(); /** 635 docs get generated - [0, 0, 0, 0] | [200.0] - [1, 1, 1, 1] | [700.0] - [2, 2, 2, 2] | [1200.0] - [3, 3, 3, 3] | [1700.0] - [4, 4, 4, 4] | [2200.0] + [0, 0, 0, 0] | [200.0, 10] + [0, 0, 1, 1] | [700.0, 10] + [0, 0, 2, 2] | [1200.0, 10] + [0, 0, 3, 3] | [1700.0, 10] + [1, 0, 4, 4] | [2200.0, 10] + [1, 0, 5, 5] | [2700.0, 10] + [1, 0, 6, 6] | [3200.0, 10] + [1, 0, 7, 7] | [3700.0, 10] + [2, 0, 8, 8] | [4200.0, 10] + [2, 0, 9, 9] | [4700.0, 10] + [2, 1, 10, 10] | [5200.0, 10] + [2, 1, 11, 11] | [5700.0, 10] ..... - [null, null, null, 99] | [49700.0] + [18, 7, null, null] | [147800.0, 40] + ... + [7, 2, null, null] | [28900.0, 20] + ... + [null, null, null, 99] | [49700.0, 10] ..... - [null, null, null, null] | [2495000.0] + [null, null, null, null] | [2495000.0, 1000] */ assertEquals(635, starTreeDocuments.size()); + for (StarTreeDocument starTreeDocument : starTreeDocuments) { + if (starTreeDocument.dimensions[0] != null + && starTreeDocument.dimensions[1] != null + && starTreeDocument.dimensions[2] != null + && starTreeDocument.dimensions[3] != null) { + assertEquals(10L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[1] != null + && starTreeDocument.dimensions[2] != null + && starTreeDocument.dimensions[3] != null) { + assertEquals(10L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[0] != null + && starTreeDocument.dimensions[2] != null + && starTreeDocument.dimensions[3] != null) { + assertEquals(10L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[0] != null + && starTreeDocument.dimensions[1] != null + && starTreeDocument.dimensions[3] != null) { + assertEquals(10L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[0] != null && starTreeDocument.dimensions[3] != null) { + assertEquals(10L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[0] != null && starTreeDocument.dimensions[1] != null) { + assertTrue((long) starTreeDocument.metrics[1] == 20L || (long) starTreeDocument.metrics[1] == 40L); + } else if (starTreeDocument.dimensions[1] != null && starTreeDocument.dimensions[3] != null) { + assertEquals(10L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[1] != null) { + assertEquals(100L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[0] != null) { + assertEquals(40L, starTreeDocument.metrics[1]); + } + } validateStarTree(builder.getRootNode(), 4, sf.getStarTreeConfig().maxLeafDocs(), builder.getStarTreeDocuments()); } @@ -2378,6 +2532,8 @@ private StarTreeValues getStarTreeValues( List docsWithField4, List metricsList, List metricsWithField, + List metricsList1, + List metricsWithField1, StarTreeField sf ) { SortedNumericDocValues d1sndv = getSortedNumericMock(dimList1, docsWithField1); @@ -2385,8 +2541,11 @@ private StarTreeValues getStarTreeValues( SortedNumericDocValues d3sndv = getSortedNumericMock(dimList3, docsWithField3); SortedNumericDocValues d4sndv = getSortedNumericMock(dimList4, docsWithField4); SortedNumericDocValues m1sndv = getSortedNumericMock(metricsList, metricsWithField); + SortedNumericDocValues m2sndv = getSortedNumericMock(metricsList1, metricsWithField1); Map dimDocIdSetIterators = Map.of("field1", d1sndv, "field3", d2sndv, "field5", d3sndv, "field8", d4sndv); - Map metricDocIdSetIterators = Map.of("field2", m1sndv); + Map metricDocIdSetIterators = new LinkedHashMap<>(); + metricDocIdSetIterators.put("field2", m1sndv); + metricDocIdSetIterators.put("_doc_count", m2sndv); StarTreeValues starTreeValues = new StarTreeValues(sf, null, dimDocIdSetIterators, metricDocIdSetIterators, getAttributes(500)); return starTreeValues; } @@ -2438,7 +2597,14 @@ public void testMergeFlowWithDuplicateDimensionValueWithMaxLeafDocs() throws IOE metricsWithField.add(i); } - StarTreeField sf = getStarTreeField(3); + List docCountMetricsList = new ArrayList<>(100); + List docCountMetricsWithField = new ArrayList<>(100); + for (int i = 0; i < 500; i++) { + metricsList.add(getLongFromDouble(i * 2)); + metricsWithField.add(i); + } + + StarTreeField sf = getStarTreeFieldWithDocCount(3, true); StarTreeValues starTreeValues = getStarTreeValues( dimList1, docsWithField1, @@ -2450,6 +2616,8 @@ public void testMergeFlowWithDuplicateDimensionValueWithMaxLeafDocs() throws IOE docsWithField4, metricsList, metricsWithField, + docCountMetricsList, + docCountMetricsWithField, sf ); @@ -2464,6 +2632,8 @@ public void testMergeFlowWithDuplicateDimensionValueWithMaxLeafDocs() throws IOE docsWithField4, metricsList, metricsWithField, + docCountMetricsList, + docCountMetricsWithField, sf ); builder = getStarTreeBuilder(sf, writeState, mapperService); @@ -2536,8 +2706,13 @@ public void testMergeFlowWithMaxLeafDocsAndStarTreeNodesAssertion() throws IOExc metricsList.add(getLongFromDouble(10.0)); metricsWithField.add(i); } - - StarTreeField sf = getStarTreeField(10); + List metricsList1 = new ArrayList<>(100); + List metricsWithField1 = new ArrayList<>(100); + for (int i = 0; i < 500; i++) { + metricsList.add(1L); + metricsWithField.add(i); + } + StarTreeField sf = getStarTreeFieldWithDocCount(10, true); StarTreeValues starTreeValues = getStarTreeValues( dimList1, docsWithField1, @@ -2549,6 +2724,8 @@ public void testMergeFlowWithMaxLeafDocsAndStarTreeNodesAssertion() throws IOExc docsWithField4, metricsList, metricsWithField, + metricsList1, + metricsWithField1, sf ); @@ -2563,6 +2740,8 @@ public void testMergeFlowWithMaxLeafDocsAndStarTreeNodesAssertion() throws IOExc docsWithField4, metricsList, metricsWithField, + metricsList1, + metricsWithField1, sf ); builder = getStarTreeBuilder(sf, writeState, mapperService); @@ -2584,14 +2763,18 @@ public void testMergeFlowWithMaxLeafDocsAndStarTreeNodesAssertion() throws IOExc validateStarTree(builder.getRootNode(), 4, sf.getStarTreeConfig().maxLeafDocs(), builder.getStarTreeDocuments()); } - private static StarTreeField getStarTreeField(int maxLeafDocs) { + private static StarTreeField getStarTreeFieldWithDocCount(int maxLeafDocs, boolean includeDocCountMetric) { Dimension d1 = new NumericDimension("field1"); Dimension d2 = new NumericDimension("field3"); Dimension d3 = new NumericDimension("field5"); Dimension d4 = new NumericDimension("field8"); List dims = List.of(d1, d2, d3, d4); Metric m1 = new Metric("field2", List.of(MetricStat.SUM)); - List metrics = List.of(m1); + Metric m2 = null; + if (includeDocCountMetric) { + m2 = new Metric("_doc_count", List.of(MetricStat.DOC_COUNT)); + } + List metrics = m2 == null ? List.of(m1) : List.of(m1, m2); StarTreeFieldConfiguration c = new StarTreeFieldConfiguration( maxLeafDocs, new HashSet<>(), @@ -2684,8 +2867,9 @@ public void testMergeFlow() throws IOException { Dimension d4 = new NumericDimension("field8"); // Dimension d5 = new NumericDimension("field5"); Metric m1 = new Metric("field2", List.of(MetricStat.SUM)); + Metric m2 = new Metric("_doc_count", List.of(MetricStat.DOC_COUNT)); List dims = List.of(d1, d2, d3, d4); - List metrics = List.of(m1); + List metrics = List.of(m1, m2); StarTreeFieldConfiguration c = new StarTreeFieldConfiguration( 1, new HashSet<>(), @@ -2697,8 +2881,9 @@ public void testMergeFlow() throws IOException { SortedNumericDocValues d3sndv = getSortedNumericMock(dimList3, docsWithField3); SortedNumericDocValues d4sndv = getSortedNumericMock(dimList4, docsWithField4); SortedNumericDocValues m1sndv = getSortedNumericMock(metricsList, metricsWithField); + SortedNumericDocValues m2sndv = DocValues.emptySortedNumeric(); Map dimDocIdSetIterators = Map.of("field1", d1sndv, "field3", d2sndv, "field5", d3sndv, "field8", d4sndv); - Map metricDocIdSetIterators = Map.of("field2", m1sndv); + Map metricDocIdSetIterators = Map.of("field2", m1sndv, "_doc_count", m2sndv); StarTreeValues starTreeValues = new StarTreeValues(sf, null, dimDocIdSetIterators, metricDocIdSetIterators, getAttributes(1000)); SortedNumericDocValues f2d1sndv = getSortedNumericMock(dimList1, docsWithField1); @@ -2706,6 +2891,7 @@ public void testMergeFlow() throws IOException { SortedNumericDocValues f2d3sndv = getSortedNumericMock(dimList3, docsWithField3); SortedNumericDocValues f2d4sndv = getSortedNumericMock(dimList4, docsWithField4); SortedNumericDocValues f2m1sndv = getSortedNumericMock(metricsList, metricsWithField); + SortedNumericDocValues f2m2sndv = DocValues.emptySortedNumeric(); Map f2dimDocIdSetIterators = Map.of( "field1", f2d1sndv, @@ -2716,7 +2902,7 @@ public void testMergeFlow() throws IOException { "field8", f2d4sndv ); - Map f2metricDocIdSetIterators = Map.of("field2", f2m1sndv); + Map f2metricDocIdSetIterators = Map.of("field2", f2m1sndv, "_doc_count", f2m2sndv); StarTreeValues starTreeValues2 = new StarTreeValues( sf, null, @@ -2728,17 +2914,18 @@ public void testMergeFlow() throws IOException { builder = getStarTreeBuilder(sf, writeState, mapperService); Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); /** - [0, 0, 0, 0] | [0.0] - [1, 1, 1, 1] | [20.0] - [2, 2, 2, 2] | [40.0] - [3, 3, 3, 3] | [60.0] - [4, 4, 4, 4] | [80.0] - [5, 5, 5, 5] | [100.0] + [0, 0, 0, 0] | [0.0, 2] + [1, 1, 1, 1] | [20.0, 2] + [2, 2, 2, 2] | [40.0, 2] + [3, 3, 3, 3] | [60.0, 2] + [4, 4, 4, 4] | [80.0, 2] + [5, 5, 5, 5] | [100.0, 2] ... [999, 999, 999, 999] | [19980.0] */ for (StarTreeDocument starTreeDocument : builder.getStarTreeDocuments()) { assertEquals(starTreeDocument.dimensions[0] * 20.0, starTreeDocument.metrics[0]); + assertEquals(2L, starTreeDocument.metrics[1]); } builder.build(starTreeDocumentIterator); @@ -2934,13 +3121,6 @@ private static StarTreeField getStarTreeField(MetricStat count) { return new StarTreeField("sf", dims, metrics, c); } - private Long getLongFromDouble(Double num) { - if (num == null) { - return null; - } - return NumericUtils.doubleToSortableLong(num); - } - SortedNumericDocValues getSortedNumericMock(List dimList, List docsWithField) { return new SortedNumericDocValues() { int index = -1; diff --git a/server/src/test/java/org/opensearch/index/mapper/DerivedFieldMapperQueryTests.java b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldMapperQueryTests.java index b9bdfca3509e3..c744f2592e24f 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DerivedFieldMapperQueryTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldMapperQueryTests.java @@ -15,6 +15,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.queryparser.classic.ParseException; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.TopDocs; @@ -24,9 +25,12 @@ import org.opensearch.common.lucene.Lucene; import org.opensearch.core.index.Index; import org.opensearch.geometry.Rectangle; +import org.opensearch.index.query.MatchPhrasePrefixQueryBuilder; +import org.opensearch.index.query.MultiMatchQueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.search.QueryStringQueryParser; import org.opensearch.script.DerivedFieldScript; import java.io.IOException; @@ -435,7 +439,7 @@ public void execute() { } } - public void testObjectDerivedFields() throws IOException { + public void testObjectDerivedFields() throws IOException, ParseException { MapperService mapperService = createMapperService(topMapping(b -> { b.startObject("properties"); { @@ -545,6 +549,17 @@ public void execute() { topDocs = searcher.search(query, 10); assertEquals(0, topDocs.totalHits.value); + query = new MatchPhrasePrefixQueryBuilder("object_field.text_field", "document number").toQuery(queryShardContext); + topDocs = searcher.search(query, 10); + assertEquals(0, topDocs.totalHits.value); + + // Multi Phrase Query + query = QueryBuilders.multiMatchQuery("GET", "object_field.nested_field.sub_field_1", "object_field.keyword_field") + .type(MultiMatchQueryBuilder.Type.PHRASE) + .toQuery(queryShardContext); + topDocs = searcher.search(query, 10); + assertEquals(7, topDocs.totalHits.value); + // Range queries of types - date, long and double query = QueryBuilders.rangeQuery("object_field.date_field").from("2024-03-20T14:20:50").toQuery(queryShardContext); topDocs = searcher.search(query, 10); @@ -567,6 +582,11 @@ public void execute() { topDocs = searcher.search(query, 10); assertEquals(7, topDocs.totalHits.value); + QueryStringQueryParser queryParser = new QueryStringQueryParser(queryShardContext, "object_field.keyword_field"); + queryParser.parse("GE?"); + topDocs = searcher.search(query, 10); + assertEquals(7, topDocs.totalHits.value); + // Regexp Query query = QueryBuilders.regexpQuery("object_field.keyword_field", ".*let.*").toQuery(queryShardContext); topDocs = searcher.search(query, 10); diff --git a/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java index fe9db24f494ad..7da8c9eb1efa0 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java @@ -17,6 +17,7 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.memory.MemoryIndex; +import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper; import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchException; import org.opensearch.common.collect.Tuple; @@ -59,6 +60,7 @@ public void testBooleanType() { assertTrue(dft.getFieldMapper() instanceof BooleanFieldMapper); assertTrue(dft.getIndexableFieldGenerator().apply(true) instanceof Field); assertTrue(dft.getIndexableFieldGenerator().apply(false) instanceof Field); + assertEquals("derived", dft.typeName()); } public void testDateType() { @@ -159,6 +161,22 @@ public void testGetAggregationScript_ip() throws IOException { assertEquals(new BytesRef(InetAddressPoint.encode(InetAddresses.forString((String) expected.get(0)))), result.get(0)); } + public void testDerivedFieldValueFetcherDoesNotSupportCustomFormats() { + DerivedFieldType dft = createDerivedFieldType("boolean"); + expectThrows( + IllegalArgumentException.class, + () -> dft.valueFetcher(mock(QueryShardContext.class), mock(SearchLookup.class), "yyyy-MM-dd") + ); + } + + public void testSpanPrefixQueryNotSupported() { + DerivedFieldType dft = createDerivedFieldType("boolean"); + expectThrows( + IllegalArgumentException.class, + () -> dft.spanPrefixQuery("value", mock(SpanMultiTermQueryWrapper.SpanRewriteMethod.class), mock(QueryShardContext.class)) + ); + } + private static LeafSearchLookup mockValueFetcherForAggs(QueryShardContext mockContext, DerivedFieldType dft, List expected) { SearchLookup searchLookup = mock(SearchLookup.class); LeafSearchLookup leafLookup = mock(LeafSearchLookup.class); diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java index 637072c8886c1..5b5ca378ee7ff 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java @@ -25,7 +25,6 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.core.IsEqual.equalTo; -import static org.hamcrest.core.StringContains.containsString; public class FlatObjectFieldMapperTests extends MapperTestCase { private static final String FIELD_TYPE = "flat_object"; @@ -133,9 +132,10 @@ public void testDefaults() throws Exception { public void testNullValue() throws IOException { DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); - MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.nullField("field")))); - assertThat(e.getMessage(), containsString("object mapping for [_doc] tried to parse field [field] as object")); - + ParsedDocument parsedDocument = mapper.parse(source(b -> b.nullField("field"))); + assertEquals(1, parsedDocument.docs().size()); + IndexableField[] fields = parsedDocument.rootDoc().getFields("field"); + assertEquals(0, fields.length); } @Override diff --git a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java index 3fa97825cdfc6..449b251dddca1 100644 --- a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java @@ -52,10 +52,11 @@ public void teardown() { } public void testValidStarTree() throws IOException { - MapperService mapperService = createMapperService(getExpandedMapping("status", "size")); + MapperService mapperService = createMapperService(getExpandedMappingWithJustAvg("status", "size")); Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); for (CompositeMappedFieldType type : compositeFieldTypes) { StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) type; + assertEquals(2, starTreeFieldType.getDimensions().size()); assertEquals("@timestamp", starTreeFieldType.getDimensions().get(0).getField()); assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); @@ -65,8 +66,11 @@ public void testValidStarTree() throws IOException { ); assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals(2, starTreeFieldType.getMetrics().size()); assertEquals("size", starTreeFieldType.getMetrics().get(0).getField()); - List expectedMetrics = Arrays.asList(MetricStat.SUM, MetricStat.AVG); + + // Assert COUNT and SUM gets added when AVG is defined + List expectedMetrics = Arrays.asList(MetricStat.AVG, MetricStat.VALUE_COUNT, MetricStat.SUM); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); assertEquals(100, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); @@ -77,6 +81,67 @@ public void testValidStarTree() throws IOException { } } + public void testMetricsWithJustSum() throws IOException { + MapperService mapperService = createMapperService(getExpandedMappingWithJustSum("status", "size")); + Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); + for (CompositeMappedFieldType type : compositeFieldTypes) { + StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) type; + assertEquals("@timestamp", starTreeFieldType.getDimensions().get(0).getField()); + assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); + DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); + List expectedTimeUnits = Arrays.asList( + Rounding.DateTimeUnit.DAY_OF_MONTH, + Rounding.DateTimeUnit.MONTH_OF_YEAR + ); + assertEquals(expectedTimeUnits, dateDim.getIntervals()); + assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals("size", starTreeFieldType.getMetrics().get(0).getField()); + + // Assert AVG gets added when both of its base metrics is already present + List expectedMetrics = List.of(MetricStat.SUM); + assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + assertEquals(100, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); + assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); + assertEquals( + new HashSet<>(Arrays.asList("@timestamp", "status")), + starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims() + ); + } + } + + public void testMetricsWithCountAndSum() throws IOException { + MapperService mapperService = createMapperService(getExpandedMappingWithSumAndCount("status", "size")); + Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); + for (CompositeMappedFieldType type : compositeFieldTypes) { + StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) type; + assertEquals("@timestamp", starTreeFieldType.getDimensions().get(0).getField()); + assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); + DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); + List expectedTimeUnits = Arrays.asList( + Rounding.DateTimeUnit.DAY_OF_MONTH, + Rounding.DateTimeUnit.MONTH_OF_YEAR + ); + assertEquals(expectedTimeUnits, dateDim.getIntervals()); + assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals("size", starTreeFieldType.getMetrics().get(0).getField()); + + // Assert AVG gets added when both of its base metrics is already present + List expectedMetrics = List.of(MetricStat.SUM, MetricStat.VALUE_COUNT, MetricStat.AVG); + assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + + Metric metric = starTreeFieldType.getMetrics().get(1); + assertEquals("_doc_count", metric.getField()); + assertEquals(List.of(MetricStat.DOC_COUNT), metric.getMetrics()); + + assertEquals(100, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); + assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); + assertEquals( + new HashSet<>(Arrays.asList("@timestamp", "status")), + starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims() + ); + } + } + public void testValidStarTreeDefaults() throws IOException { MapperService mapperService = createMapperService(getMinMapping()); Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); @@ -91,15 +156,17 @@ public void testValidStarTreeDefaults() throws IOException { ); assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals(3, starTreeFieldType.getMetrics().size()); assertEquals("status", starTreeFieldType.getMetrics().get(0).getField()); - List expectedMetrics = Arrays.asList( - MetricStat.AVG, - MetricStat.VALUE_COUNT, - MetricStat.SUM, - MetricStat.MAX, - MetricStat.MIN - ); + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + + assertEquals("metric_field", starTreeFieldType.getMetrics().get(1).getField()); + expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); + assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(1).getMetrics()); + Metric metric = starTreeFieldType.getMetrics().get(2); + assertEquals("_doc_count", metric.getField()); + assertEquals(List.of(MetricStat.DOC_COUNT), metric.getMetrics()); assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims()); @@ -109,7 +176,7 @@ public void testValidStarTreeDefaults() throws IOException { public void testInvalidDim() { MapperParsingException ex = expectThrows( MapperParsingException.class, - () -> createMapperService(getExpandedMapping("invalid", "size")) + () -> createMapperService(getExpandedMappingWithJustAvg("invalid", "size")) ); assertEquals("Failed to parse mapping [_doc]: unknown dimension field [invalid]", ex.getMessage()); } @@ -117,7 +184,7 @@ public void testInvalidDim() { public void testInvalidMetric() { MapperParsingException ex = expectThrows( MapperParsingException.class, - () -> createMapperService(getExpandedMapping("status", "invalid")) + () -> createMapperService(getExpandedMappingWithJustAvg("status", "invalid")) ); assertEquals("Failed to parse mapping [_doc]: unknown metric field [invalid]", ex.getMessage()); } @@ -136,7 +203,7 @@ public void testNoMetrics() { public void testInvalidParam() { MapperParsingException ex = expectThrows( MapperParsingException.class, - () -> createMapperService(getInvalidMapping(false, false, false, false, true)) + () -> createMapperService(getInvalidMapping(false, false, false, false, true, false)) ); assertEquals( "Failed to parse mapping [_doc]: Star tree mapping definition has unsupported parameters: [invalid : {invalid=invalid}]", @@ -182,6 +249,14 @@ public void testInvalidMetricType() { ); } + public void testInvalidMetricTypeWithDocCount() { + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> createMapperService(getInvalidMapping(false, false, false, false, false, true)) + ); + assertEquals("Failed to parse mapping [_doc]: Invalid metric stat: _doc_count", ex.getMessage()); + } + public void testInvalidDimType() { MapperParsingException ex = expectThrows( MapperParsingException.class, @@ -232,6 +307,9 @@ public void testMetric() { assertEquals(MetricStat.MIN, MetricStat.fromTypeName("min")); assertEquals(MetricStat.SUM, MetricStat.fromTypeName("sum")); assertEquals(MetricStat.AVG, MetricStat.fromTypeName("avg")); + + assertEquals(List.of(MetricStat.VALUE_COUNT, MetricStat.SUM), MetricStat.AVG.getBaseMetrics()); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> MetricStat.fromTypeName("invalid")); assertEquals("Invalid metric stat: invalid", ex.getMessage()); } @@ -310,7 +388,7 @@ public void testStarTreeField() { } public void testValidations() throws IOException { - MapperService mapperService = createMapperService(getExpandedMapping("status", "size")); + MapperService mapperService = createMapperService(getExpandedMappingWithJustAvg("status", "size")); Settings settings = Settings.builder().put(CompositeIndexSettings.STAR_TREE_INDEX_ENABLED_SETTING.getKey(), true).build(); CompositeIndexSettings enabledCompositeIndexSettings = new CompositeIndexSettings( settings, @@ -370,7 +448,7 @@ public void testValidations() throws IOException { ); } - private XContentBuilder getExpandedMapping(String dim, String metric) throws IOException { + private XContentBuilder getExpandedMappingWithJustAvg(String dim, String metric) throws IOException { return topMapping(b -> { b.startObject("composite"); b.startObject("startree"); @@ -399,7 +477,6 @@ private XContentBuilder getExpandedMapping(String dim, String metric) throws IOE b.startObject(); b.field("name", metric); b.startArray("stats"); - b.value("sum"); b.value("avg"); b.endArray(); b.endObject(); @@ -421,6 +498,107 @@ private XContentBuilder getExpandedMapping(String dim, String metric) throws IOE }); } + private XContentBuilder getExpandedMappingWithJustSum(String dim, String metric) throws IOException { + return topMapping(b -> { + b.startObject("composite"); + b.startObject("startree"); + b.field("type", "star_tree"); + b.startObject("config"); + b.field("max_leaf_docs", 100); + b.startArray("skip_star_node_creation_for_dimensions"); + { + b.value("@timestamp"); + b.value("status"); + } + b.endArray(); + b.startArray("ordered_dimensions"); + b.startObject(); + b.field("name", "@timestamp"); + b.startArray("calendar_intervals"); + b.value("day"); + b.value("month"); + b.endArray(); + b.endObject(); + b.startObject(); + b.field("name", dim); + b.endObject(); + b.endArray(); + b.startArray("metrics"); + b.startObject(); + b.field("name", metric); + b.startArray("stats"); + b.value("sum"); + b.endArray(); + b.endObject(); + b.endArray(); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("properties"); + b.startObject("@timestamp"); + b.field("type", "date"); + b.endObject(); + b.startObject("status"); + b.field("type", "integer"); + b.endObject(); + b.startObject("size"); + b.field("type", "integer"); + b.endObject(); + b.endObject(); + }); + } + + private XContentBuilder getExpandedMappingWithSumAndCount(String dim, String metric) throws IOException { + return topMapping(b -> { + b.startObject("composite"); + b.startObject("startree"); + b.field("type", "star_tree"); + b.startObject("config"); + b.field("max_leaf_docs", 100); + b.startArray("skip_star_node_creation_for_dimensions"); + { + b.value("@timestamp"); + b.value("status"); + } + b.endArray(); + b.startArray("ordered_dimensions"); + b.startObject(); + b.field("name", "@timestamp"); + b.startArray("calendar_intervals"); + b.value("day"); + b.value("month"); + b.endArray(); + b.endObject(); + b.startObject(); + b.field("name", dim); + b.endObject(); + b.endArray(); + b.startArray("metrics"); + b.startObject(); + b.field("name", metric); + b.startArray("stats"); + b.value("sum"); + b.value("value_count"); + b.endArray(); + b.endObject(); + b.endArray(); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("properties"); + b.startObject("@timestamp"); + b.field("type", "date"); + b.endObject(); + b.startObject("status"); + b.field("type", "integer"); + b.endObject(); + b.startObject("size"); + b.field("type", "integer"); + b.endObject(); + b.endObject(); + }); + } + private XContentBuilder getMinMapping() throws IOException { return getMinMapping(false, false, false, false); } @@ -546,7 +724,8 @@ private XContentBuilder getInvalidMapping( boolean invalidSkipDims, boolean invalidDimType, boolean invalidMetricType, - boolean invalidParam + boolean invalidParam, + boolean invalidDocCountMetricType ) throws IOException { return topMapping(b -> { b.startObject("composite"); @@ -583,6 +762,12 @@ private XContentBuilder getInvalidMapping( b.endObject(); b.startObject(); b.field("name", "metric_field"); + if (invalidDocCountMetricType) { + b.startArray("stats"); + b.value("_doc_count"); + b.value("avg"); + b.endArray(); + } b.endObject(); b.endArray(); b.endObject(); @@ -681,7 +866,7 @@ private XContentBuilder getInvalidMappingWithDv( private XContentBuilder getInvalidMapping(boolean singleDim, boolean invalidSkipDims, boolean invalidDimType, boolean invalidMetricType) throws IOException { - return getInvalidMapping(singleDim, invalidSkipDims, invalidDimType, invalidMetricType, false); + return getInvalidMapping(singleDim, invalidSkipDims, invalidDimType, invalidMetricType, false, false); } protected boolean supportsOrIgnoresBoost() { diff --git a/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java b/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java index ecad1291bed19..bed2d22125810 100644 --- a/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java +++ b/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java @@ -88,7 +88,7 @@ public void execute() { // Create DerivedFieldQuery DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( new TermQuery(new Term("ip_from_raw_request", "247.37.0.0")), - valueFetcher, + () -> valueFetcher, searchLookup, Lucene.STANDARD_ANALYZER, indexableFieldFunction, @@ -157,7 +157,7 @@ public void execute() { // Create DerivedFieldQuery DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( new TermQuery(new Term("ip_from_raw_request", "247.37.0.0")), - valueFetcher, + () -> valueFetcher, searchLookup, Lucene.STANDARD_ANALYZER, badIndexableFieldFunction, @@ -169,7 +169,7 @@ public void execute() { // set ignore_malformed as true, query should pass derivedFieldQuery = new DerivedFieldQuery( new TermQuery(new Term("ip_from_raw_request", "247.37.0.0")), - valueFetcher, + () -> valueFetcher, searchLookup, Lucene.STANDARD_ANALYZER, badIndexableFieldFunction, diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index ec48032df4a15..a6db37285fe6f 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -25,23 +25,29 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.translog.transfer.TranslogTransferMetadata; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.test.OpenSearchTestCase; import java.math.BigInteger; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.UUID; import java.util.stream.Collectors; @@ -60,6 +66,7 @@ import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; @@ -537,4 +544,541 @@ private Map getRemoteStoreNodeAttributes() { remoteStoreNodeAttributes.put(REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, "my-translog-repo-1"); return remoteStoreNodeAttributes; } + + private void setupRemotePinnedTimestampFeature(boolean enabled) { + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), enabled).build(), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + } + + public void testGetPinnedTimestampLockedFilesFeatureDisabled() { + setupRemotePinnedTimestampFeature(false); + // Pinned timestamps 800, 900, 1000, 2000 + // Metadata with timestamp 990, 995, 1000, 1001 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + // Metadata timestamp 1001 <= Pinned Timestamp 2000 + Map metadataFilePinnedTimestampCache = new HashMap<>(); + Tuple, Set> metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L), + Set.of(800L, 900L, 1000L, 2000L), + metadataFilePinnedTimestampCache + ); + Map metadataFiles = metadataAndLocks.v1(); + Set implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(0, implicitLockedFiles.size()); + assertEquals(0, metadataFilePinnedTimestampCache.size()); + } + + public void testGetPinnedTimestampLockedFilesWithEmptyMetadataFiles() { + setupRemotePinnedTimestampFeature(true); + List metadataFiles = Collections.emptyList(); + Set pinnedTimestampSet = new HashSet<>(Arrays.asList(1L, 2L, 3L)); + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFiles, + pinnedTimestampSet, + new HashMap<>(), + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ); + assertTrue(implicitLockedFiles.isEmpty()); + } + + public void testGetPinnedTimestampLockedFilesWithNoPinnedTimestamps() { + setupRemotePinnedTimestampFeature(true); + List metadataFiles = Arrays.asList("file1.txt", "file2.txt", "file3.txt"); + Set pinnedTimestampSet = Collections.emptySet(); + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFiles, + pinnedTimestampSet, + new HashMap<>(), + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ); + assertTrue(implicitLockedFiles.isEmpty()); + } + + public void testGetPinnedTimestampLockedFilesWithNullMetadataFiles() { + setupRemotePinnedTimestampFeature(true); + List metadataFiles = null; + Set pinnedTimestampSet = new HashSet<>(Arrays.asList(1L, 2L, 3L)); + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFiles, + pinnedTimestampSet, + new HashMap<>(), + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ); + assertTrue(implicitLockedFiles.isEmpty()); + } + + public void testGetPinnedTimestampLockedFilesWithNullPinnedTimestampSet() { + setupRemotePinnedTimestampFeature(true); + List metadataFiles = Arrays.asList("file1.txt", "file2.txt", "file3.txt"); + Set pinnedTimestampSet = null; + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFiles, + pinnedTimestampSet, + new HashMap<>(), + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ); + assertTrue(implicitLockedFiles.isEmpty()); + } + + private Tuple, Set> testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List metadataFileTimestamps, + Set pinnedTimetamps, + Map metadataFilePinnedTimestampCache + ) { + String metadataPrefix = "metadata__1__2__3__4__5__"; + Map metadataFiles = new HashMap<>(); + for (Long metadataFileTimestamp : metadataFileTimestamps) { + metadataFiles.put(metadataFileTimestamp, metadataPrefix + RemoteStoreUtils.invertLong(metadataFileTimestamp)); + } + return new Tuple<>( + metadataFiles, + RemoteStoreUtils.getPinnedTimestampLockedFiles( + new ArrayList<>(metadataFiles.values()), + pinnedTimetamps, + metadataFilePinnedTimestampCache, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ) + ); + } + + private Tuple, Set> testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map metadataFileTimestampsPrimaryTermMap, + Set pinnedTimetamps, + Map metadataFilePinnedTimestampCache + ) { + setupRemotePinnedTimestampFeature(true); + Map metadataFiles = new HashMap<>(); + for (Map.Entry metadataFileTimestampPrimaryTerm : metadataFileTimestampsPrimaryTermMap.entrySet()) { + String primaryTerm = RemoteStoreUtils.invertLong(metadataFileTimestampPrimaryTerm.getValue()); + String metadataPrefix = "metadata__" + primaryTerm + "__2__3__4__5__"; + long metadataFileTimestamp = metadataFileTimestampPrimaryTerm.getKey(); + metadataFiles.put(metadataFileTimestamp, metadataPrefix + RemoteStoreUtils.invertLong(metadataFileTimestamp)); + } + return new Tuple<>( + metadataFiles, + RemoteStoreUtils.getPinnedTimestampLockedFiles( + new ArrayList<>(metadataFiles.values()), + pinnedTimetamps, + metadataFilePinnedTimestampCache, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ) + ); + } + + public void testGetPinnedTimestampLockedFilesWithPinnedTimestamps() { + setupRemotePinnedTimestampFeature(true); + + Map metadataFilePinnedTimestampCache = new HashMap<>(); + + // Pinned timestamps 800, 900 + // Metadata with timestamp 990 + // No metadata matches the timestamp + Tuple, Set> metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L), + Set.of(800L, 900L), + metadataFilePinnedTimestampCache + ); + Map metadataFiles = metadataAndLocks.v1(); + Set implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(0, implicitLockedFiles.size()); + assertEquals(0, metadataFilePinnedTimestampCache.size()); + + // Pinned timestamps 800, 900, 1000 + // Metadata with timestamp 990 + // Metadata timestamp 990 <= Pinned Timestamp 1000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L), + Set.of(800L, 900L, 1000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(990L))); + // This is still 0 as we don't cache the latest metadata file as it can change (explained in the next test case) + assertEquals(0, metadataFilePinnedTimestampCache.size()); + + // Pinned timestamps 800, 900, 1000 + // Metadata with timestamp 990, 995 + // Metadata timestamp 995 <= Pinned Timestamp 1000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L), + Set.of(800L, 900L, 1000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(995L))); + // This is still 0 as we don't cache the latest metadata file as it can change + assertEquals(0, metadataFilePinnedTimestampCache.size()); + + // Pinned timestamps 800, 900, 1000 + // Metadata with timestamp 990, 995, 1000 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L), + Set.of(800L, 900L, 1000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1000L))); + // This is still 0 as we don't cache the latest metadata file as it can change + assertEquals(0, metadataFilePinnedTimestampCache.size()); + + // Pinned timestamps 800, 900, 1000, 2000 + // Metadata with timestamp 990, 995, 1000, 1001 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + // Metadata timestamp 1001 <= Pinned Timestamp 2000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L), + Set.of(800L, 900L, 1000L, 2000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(2, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1000L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1001L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1000L), metadataFilePinnedTimestampCache.get(1000L)); + + // Pinned timestamps 800, 900, 1000, 2000, 3000, 4000, 5000 + // Metadata with timestamp 990, 995, 1000, 1001 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + // Metadata timestamp 1001 <= Pinned Timestamp 2000 + // Metadata timestamp 1001 <= Pinned Timestamp 3000 + // Metadata timestamp 1001 <= Pinned Timestamp 4000 + // Metadata timestamp 1001 <= Pinned Timestamp 5000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L), + Set.of(800L, 900L, 1000L, 2000L, 3000L, 4000L, 5000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(2, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1000L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1001L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1000L), metadataFilePinnedTimestampCache.get(1000L)); + + // Pinned timestamps 800, 900, 1000, 2000, 3000, 4000, 5000 + // Metadata with timestamp 990, 995, 1000, 1001, 1900, 2300 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + // Metadata timestamp 1900 <= Pinned Timestamp 2000 + // Metadata timestamp 2300 <= Pinned Timestamp 3000 + // Metadata timestamp 2300 <= Pinned Timestamp 4000 + // Metadata timestamp 2300 <= Pinned Timestamp 5000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L, 1900L, 2300L), + Set.of(800L, 900L, 1000L, 2000L, 3000L, 4000L, 5000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(3, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1000L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1900L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(2300L))); + // Now we cache all the matches except the last one. + assertEquals(2, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1000L), metadataFilePinnedTimestampCache.get(1000L)); + assertEquals(metadataFiles.get(1900L), metadataFilePinnedTimestampCache.get(2000L)); + + // Pinned timestamps 2000, 3000, 4000, 5000 + // Metadata with timestamp 990, 995, 1000, 1001, 1900, 2300 + // Metadata timestamp 1900 <= Pinned Timestamp 2000 + // Metadata timestamp 2300 <= Pinned Timestamp 3000 + // Metadata timestamp 2300 <= Pinned Timestamp 4000 + // Metadata timestamp 2300 <= Pinned Timestamp 5000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L, 1900L, 2300L), + Set.of(2000L, 3000L, 4000L, 5000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(2, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1900L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(2300L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1900L), metadataFilePinnedTimestampCache.get(2000L)); + + // Pinned timestamps 2000, 3000, 4000, 5000 + // Metadata with timestamp 1001, 1900, 2300, 3000, 3001, 5500, 6000 + // Metadata timestamp 1900 <= Pinned Timestamp 2000 + // Metadata timestamp 3000 <= Pinned Timestamp 3000 + // Metadata timestamp 3001 <= Pinned Timestamp 4000 + // Metadata timestamp 3001 <= Pinned Timestamp 5000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(1001L, 1900L, 2300L, 3000L, 3001L, 5500L, 6000L), + Set.of(2000L, 3000L, 4000L, 5000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(3, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1900L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(3000L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(3001L))); + // Now we cache all the matches except the last one. + assertEquals(4, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1900L), metadataFilePinnedTimestampCache.get(2000L)); + assertEquals(metadataFiles.get(3000L), metadataFilePinnedTimestampCache.get(3000L)); + assertEquals(metadataFiles.get(3001L), metadataFilePinnedTimestampCache.get(4000L)); + assertEquals(metadataFiles.get(3001L), metadataFilePinnedTimestampCache.get(5000L)); + + // Pinned timestamps 4000, 5000, 6000, 7000 + // Metadata with timestamp 2300, 3000, 3001, 5500, 6000 + // Metadata timestamp 3001 <= Pinned Timestamp 4000 + // Metadata timestamp 3001 <= Pinned Timestamp 5000 + // Metadata timestamp 6000 <= Pinned Timestamp 6000 + // Metadata timestamp 6000 <= Pinned Timestamp 7000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(2300L, 3000L, 3001L, 5500L, 6000L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(2, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(3001L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6000L))); + // Now we cache all the matches except the last one. + assertEquals(2, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(3001L), metadataFilePinnedTimestampCache.get(4000L)); + assertEquals(metadataFiles.get(3001L), metadataFilePinnedTimestampCache.get(5000L)); + } + + public void testGetPinnedTimestampLockedFilesWithPinnedTimestampsDifferentPrefix() { + setupRemotePinnedTimestampFeature(true); + + Map metadataFilePinnedTimestampCache = new HashMap<>(); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 7002 + // 6 - 6998 + // 5 - 6995 + // 5 - 6990 + Tuple, Set> metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(7002L, 6L, 6998L, 6L, 6995L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + Map metadataFiles = metadataAndLocks.v1(); + Set implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6998L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(6998L), metadataFilePinnedTimestampCache.get(7000L)); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 7002 + // 5 - 6998 + // 5 - 6995 + // 5 - 6990 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(7002L, 6L, 6998L, 5L, 6995L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6998L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(6998L), metadataFilePinnedTimestampCache.get(7000L)); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 7002 + // 6 - 6998 + // 5 - 7001 + // 5 - 6990 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(7002L, 6L, 6998L, 6L, 7001L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6998L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(6998L), metadataFilePinnedTimestampCache.get(7000L)); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 7002 + // 5 - 7005 + // 5 - 6990 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(7002L, 6L, 7005L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6990L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(6990L), metadataFilePinnedTimestampCache.get(7000L)); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 6999 + // 5 - 7005 + // 5 - 6990 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(6999L, 6L, 7005L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6999L))); + // Now we cache all the matches except the last one. + assertEquals(0, metadataFilePinnedTimestampCache.size()); + } + + public void testFilterOutMetadataFilesBasedOnAgeFeatureDisabled() { + setupRemotePinnedTimestampFeature(false); + List metadataFiles = new ArrayList<>(); + + for (int i = 0; i < randomIntBetween(5, 10); i++) { + metadataFiles.add((System.currentTimeMillis() - randomIntBetween(-150000, 150000)) + "_file" + i + ".txt"); + } + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + assertEquals(metadataFiles, result); + } + + public void testFilterOutMetadataFilesBasedOnAge_AllFilesOldEnough() { + setupRemotePinnedTimestampFeature(true); + + List metadataFiles = Arrays.asList( + (System.currentTimeMillis() - 150000) + "_file1.txt", + (System.currentTimeMillis() - 300000) + "_file2.txt", + (System.currentTimeMillis() - 450000) + "_file3.txt" + ); + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + assertEquals(metadataFiles, result); + } + + public void testFilterOutMetadataFilesBasedOnAge_SomeFilesTooNew() { + setupRemotePinnedTimestampFeature(true); + + String file1 = (System.currentTimeMillis() - 150000) + "_file1.txt"; + String file2 = (System.currentTimeMillis() - 300000) + "_file2.txt"; + String file3 = (System.currentTimeMillis() + 450000) + "_file3.txt"; + + List metadataFiles = Arrays.asList(file1, file2, file3); + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + List expected = Arrays.asList(file1, file2); + assertEquals(expected, result); + } + + public void testFilterOutMetadataFilesBasedOnAge_AllFilesTooNew() { + setupRemotePinnedTimestampFeature(true); + + String file1 = (System.currentTimeMillis() + 150000) + "_file1.txt"; + String file2 = (System.currentTimeMillis() + 300000) + "_file2.txt"; + String file3 = (System.currentTimeMillis() + 450000) + "_file3.txt"; + + List metadataFiles = Arrays.asList(file1, file2, file3); + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + assertTrue(result.isEmpty()); + } + + public void testFilterOutMetadataFilesBasedOnAge_EmptyInputList() { + setupRemotePinnedTimestampFeature(true); + + List metadataFiles = Arrays.asList(); + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + assertTrue(result.isEmpty()); + } + + public void testIsPinnedTimestampStateStaleFeatureDisabled() { + setupRemotePinnedTimestampFeature(false); + assertFalse(RemoteStoreUtils.isPinnedTimestampStateStale()); + } + + public void testIsPinnedTimestampStateStaleFeatureEnabled() { + setupRemotePinnedTimestampFeature(true); + assertTrue(RemoteStoreUtils.isPinnedTimestampStateStale()); + } } diff --git a/server/src/test/java/org/opensearch/index/store/BaseRemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/BaseRemoteSegmentStoreDirectoryTests.java index ff9b62a341deb..2c55d26261fe0 100644 --- a/server/src/test/java/org/opensearch/index/store/BaseRemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/BaseRemoteSegmentStoreDirectoryTests.java @@ -43,16 +43,9 @@ public class BaseRemoteSegmentStoreDirectoryTests extends IndexShardTestCase { protected SegmentInfos segmentInfos; protected ThreadPool threadPool; - protected final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( - 12, - 23, - 34, - 1, - 1, - "node-1" - ); + protected String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(12, 23, 34, 1, 1, "node-1"); - protected final String metadataFilenameDup = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + protected String metadataFilenameDup = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( 12, 23, 34, @@ -60,30 +53,9 @@ public class BaseRemoteSegmentStoreDirectoryTests extends IndexShardTestCase { 1, "node-2" ); - protected final String metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( - 12, - 13, - 34, - 1, - 1, - "node-1" - ); - protected final String metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( - 10, - 38, - 34, - 1, - 1, - "node-1" - ); - protected final String metadataFilename4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( - 10, - 36, - 34, - 1, - 1, - "node-1" - ); + protected String metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(12, 13, 34, 1, 1, "node-1"); + protected String metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(10, 38, 34, 1, 1, "node-1"); + protected String metadataFilename4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(10, 36, 34, 1, 1, "node-1"); public void setupRemoteSegmentStoreDirectory() throws IOException { remoteDataDirectory = mock(RemoteDirectory.class); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryWithPinnedTimestampTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryWithPinnedTimestampTests.java new file mode 100644 index 0000000000000..b4f93d706bb1e --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryWithPinnedTimestampTests.java @@ -0,0 +1,292 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.util.Version; +import org.opensearch.common.UUIDs; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.gateway.remote.model.RemotePinnedTimestamps; +import org.opensearch.gateway.remote.model.RemoteStorePinnedTimestampsBlobStore; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.indices.RemoteStoreSettings; +import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; +import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import org.mockito.Mockito; + +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED; +import static org.opensearch.test.RemoteStoreTestUtils.createMetadataFileBytes; +import static org.hamcrest.CoreMatchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class RemoteSegmentStoreDirectoryWithPinnedTimestampTests extends RemoteSegmentStoreDirectoryTests { + + Runnable updatePinnedTimstampTask; + BlobStoreTransferService blobStoreTransferService; + RemoteStorePinnedTimestampsBlobStore remoteStorePinnedTimestampsBlobStore; + RemoteStorePinnedTimestampService remoteStorePinnedTimestampServiceSpy; + + @Before + public void setupPinnedTimestamp() throws IOException { + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), true).build(), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + + Supplier repositoriesServiceSupplier = mock(Supplier.class); + Settings settings = Settings.builder() + .put(Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "remote-repo") + .build(); + RepositoriesService repositoriesService = mock(RepositoriesService.class); + when(repositoriesServiceSupplier.get()).thenReturn(repositoriesService); + BlobStoreRepository blobStoreRepository = mock(BlobStoreRepository.class); + when(repositoriesService.repository("remote-repo")).thenReturn(blobStoreRepository); + + when(threadPool.schedule(any(), any(), any())).then(invocationOnMock -> { + updatePinnedTimstampTask = invocationOnMock.getArgument(0); + updatePinnedTimstampTask.run(); + return null; + }).then(subsequentInvocationsOnMock -> null); + + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = new RemoteStorePinnedTimestampService( + repositoriesServiceSupplier, + settings, + threadPool, + clusterService + ); + remoteStorePinnedTimestampServiceSpy = Mockito.spy(remoteStorePinnedTimestampService); + + remoteStorePinnedTimestampsBlobStore = mock(RemoteStorePinnedTimestampsBlobStore.class); + blobStoreTransferService = mock(BlobStoreTransferService.class); + when(remoteStorePinnedTimestampServiceSpy.pinnedTimestampsBlobStore()).thenReturn(remoteStorePinnedTimestampsBlobStore); + when(remoteStorePinnedTimestampServiceSpy.blobStoreTransferService()).thenReturn(blobStoreTransferService); + + doAnswer(invocationOnMock -> { + ActionListener> actionListener = invocationOnMock.getArgument(3); + actionListener.onResponse(new ArrayList<>()); + return null; + }).when(blobStoreTransferService).listAllInSortedOrder(any(), any(), eq(1), any()); + + remoteStorePinnedTimestampServiceSpy.start(); + + metadataWithOlderTimestamp(); + } + + private void metadataWithOlderTimestamp() { + metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() - 300000 + ); + metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 13, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() - 400000 + ); + metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 10, + 38, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() - 500000 + ); + metadataFilename4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 10, + 36, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() - 600000 + ); + } + + public void testInitializeToSpecificTimestampNoMetadataFiles() throws IOException { + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + Integer.MAX_VALUE + ) + ).thenReturn(new ArrayList<>()); + assertNull(remoteSegmentStoreDirectory.initializeToSpecificTimestamp(1234L)); + } + + public void testInitializeToSpecificTimestampNoMdMatchingTimestamp() throws IOException { + String metadataPrefix = "metadata__1__2__3__4__5__"; + List metadataFiles = new ArrayList<>(); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(2000)); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(3000)); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(4000)); + + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + Integer.MAX_VALUE + ) + ).thenReturn(metadataFiles); + assertNull(remoteSegmentStoreDirectory.initializeToSpecificTimestamp(1234L)); + } + + public void testInitializeToSpecificTimestampMatchingMdFile() throws IOException { + String metadataPrefix = "metadata__1__2__3__4__5__"; + List metadataFiles = new ArrayList<>(); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(1000)); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(2000)); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(3000)); + + Map metadata = new HashMap<>(); + metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234::512::" + Version.LATEST.major); + metadata.put("_0.cfs", "_0.cfs::_0.cfs__" + UUIDs.base64UUID() + "::2345::1024::" + Version.LATEST.major); + + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + Integer.MAX_VALUE + ) + ).thenReturn(metadataFiles); + when(remoteMetadataDirectory.getBlobStream(metadataPrefix + RemoteStoreUtils.invertLong(1000))).thenReturn( + createMetadataFileBytes(metadata, indexShard.getLatestReplicationCheckpoint(), segmentInfos) + ); + + RemoteSegmentMetadata remoteSegmentMetadata = remoteSegmentStoreDirectory.initializeToSpecificTimestamp(1234L); + assertNotNull(remoteSegmentMetadata); + Map uploadedSegments = remoteSegmentStoreDirectory + .getSegmentsUploadedToRemoteStore(); + assertEquals(2, uploadedSegments.size()); + assertTrue(uploadedSegments.containsKey("_0.cfe")); + assertTrue(uploadedSegments.containsKey("_0.cfs")); + } + + public void testDeleteStaleCommitsNoPinnedTimestampMdFilesLatest() throws Exception { + metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() + ); + metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 13, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() + ); + metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 10, + 38, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() + ); + + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + eq(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX), + anyInt() + ) + ).thenReturn(List.of(metadataFilename, metadataFilename2, metadataFilename3)); + + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + // populateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + // But as the oldest metadata file's timestamp is within time threshold since last successful fetch, + // GC will skip deleting any data or metadata files. + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); + + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteDataDirectory, times(0)).deleteFile(any()); + verify(remoteMetadataDirectory, times(0)).deleteFile(any()); + } + + public void testDeleteStaleCommitsPinnedTimestampMdFile() throws Exception { + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + eq(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX), + anyInt() + ) + ).thenReturn(List.of(metadataFilename, metadataFilename2, metadataFilename3)); + + doAnswer(invocationOnMock -> { + ActionListener> actionListener = invocationOnMock.getArgument(3); + actionListener.onResponse(List.of(new PlainBlobMetadata("pinned_timestamp_123", 1000))); + return null; + }).when(blobStoreTransferService).listAllInSortedOrder(any(), any(), eq(1), any()); + + long pinnedTimestampMatchingMetadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getTimestamp(metadataFilename2) + 10; + when(remoteStorePinnedTimestampsBlobStore.read(any())).thenReturn(new RemotePinnedTimestamps.PinnedTimestamps(Map.of(pinnedTimestampMatchingMetadataFilename2, List.of("xyz")))); + when(remoteStorePinnedTimestampsBlobStore.getBlobPathForUpload(any())).thenReturn(new BlobPath()); + + final Map> metadataFilenameContentMapping = populateMetadata(); + final List filesToBeDeleted = metadataFilenameContentMapping.get(metadataFilename3) + .values() + .stream() + .map(metadata -> metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]) + .collect(Collectors.toList()); + + updatePinnedTimstampTask.run(); + + remoteSegmentStoreDirectory.init(); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(1); + + for (final String file : filesToBeDeleted) { + verify(remoteDataDirectory).deleteFile(file); + } + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteMetadataDirectory).deleteFile(metadataFilename3); + verify(remoteMetadataDirectory, times(0)).deleteFile(metadataFilename2); + } +} diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index 6757dbc184961..b5350a39e8599 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -31,12 +31,15 @@ package org.opensearch.indices; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.Version; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.admin.indices.stats.IndexShardStats; +import org.opensearch.action.search.SearchType; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexGraveyard; @@ -44,6 +47,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; +import org.opensearch.common.lucene.index.OpenSearchDirectoryReader.DelegatingCacheHelper; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -76,8 +80,11 @@ import org.opensearch.plugins.EnginePlugin; import org.opensearch.plugins.MapperPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.search.internal.ContextIndexSearcher; +import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.opensearch.test.TestSearchContext; import org.opensearch.test.hamcrest.RegexMatcher; import java.io.IOException; @@ -627,4 +634,32 @@ public void testClusterRemoteTranslogBufferIntervalDefault() { indicesService.getRemoteStoreSettings().getClusterRemoteTranslogBufferInterval() ); } + + public void testDirectoryReaderWithoutDelegatingCacheHelperNotCacheable() throws IOException { + IndicesService indicesService = getIndicesService(); + final IndexService indexService = createIndex("test"); + ShardSearchRequest request = mock(ShardSearchRequest.class); + when(request.requestCache()).thenReturn(true); + + TestSearchContext context = new TestSearchContext(indexService.getBigArrays(), indexService) { + @Override + public SearchType searchType() { + return SearchType.QUERY_THEN_FETCH; + } + }; + + ContextIndexSearcher searcher = mock(ContextIndexSearcher.class); + context.setSearcher(searcher); + DirectoryReader reader = mock(DirectoryReader.class); + when(searcher.getDirectoryReader()).thenReturn(reader); + when(searcher.getIndexReader()).thenReturn(reader); + IndexReader.CacheHelper notDelegatingCacheHelper = mock(IndexReader.CacheHelper.class); + DelegatingCacheHelper delegatingCacheHelper = mock(DelegatingCacheHelper.class); + + for (boolean useDelegatingCacheHelper : new boolean[] { true, false }) { + IndexReader.CacheHelper cacheHelper = useDelegatingCacheHelper ? delegatingCacheHelper : notDelegatingCacheHelper; + when(reader.getReaderCacheHelper()).thenReturn(cacheHelper); + assertEquals(useDelegatingCacheHelper, indicesService.canCache(request, context)); + } + } } diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index 43ebb86fd5342..cb0a36c870d07 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -51,6 +51,7 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Priority; import org.opensearch.common.UUIDs; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; @@ -669,6 +670,7 @@ public void finalizeSnapshot( SnapshotInfo snapshotInfo, Version repositoryMetaVersion, Function stateTransformer, + Priority repositoryUpdatePriority, ActionListener listener ) { listener.onResponse(null); diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java index e4e83f2453fa2..7fc987dcfa9bb 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingHelper; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Priority; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; @@ -213,10 +214,12 @@ public void testSnapshotWithConflictingName() throws Exception { Collections.emptyList(), true, Collections.emptyMap(), - false + false, + 0 ), Version.CURRENT, Function.identity(), + Priority.NORMAL, f ) ); diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java index bd47507da4863..eabac37bf3434 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -39,6 +39,7 @@ import org.opensearch.client.Client; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Priority; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; @@ -224,7 +225,7 @@ public void testRepositoryDataConcurrentModificationNotAllowed() { RepositoryData repositoryData = generateRandomRepoData(); final long startingGeneration = repositoryData.getGenId(); final PlainActionFuture future1 = PlainActionFuture.newFuture(); - repository.writeIndexGen(repositoryData, startingGeneration, Version.CURRENT, Function.identity(), future1); + repository.writeIndexGen(repositoryData, startingGeneration, Version.CURRENT, Function.identity(), Priority.NORMAL, future1); // write repo data again to index generational file, errors because we already wrote to the // N+1 generation from which this repository data instance was created @@ -295,7 +296,7 @@ public void testFsRepositoryCompressDeprecatedIgnored() { private static void writeIndexGen(BlobStoreRepository repository, RepositoryData repositoryData, long generation) throws Exception { PlainActionFuture.get( - f -> repository.writeIndexGen(repositoryData, generation, Version.CURRENT, Function.identity(), f) + f -> repository.writeIndexGen(repositoryData, generation, Version.CURRENT, Function.identity(), Priority.NORMAL, f) ); } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotInfoTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotInfoTests.java index 850a392c9619c..684a8dd36fccc 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotInfoTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotInfoTests.java @@ -86,7 +86,8 @@ protected SnapshotInfo createTestInstance() { shardFailures, includeGlobalState, userMetadata, - remoteStoreIndexShallowCopy + remoteStoreIndexShallowCopy, + 0 ); } @@ -114,7 +115,8 @@ protected SnapshotInfo mutateInstance(SnapshotInfo instance) { instance.shardFailures(), instance.includeGlobalState(), instance.userMetadata(), - instance.isRemoteStoreIndexShallowCopyEnabled() + instance.isRemoteStoreIndexShallowCopyEnabled(), + 0 ); case 1: int indicesSize = randomValueOtherThan(instance.indices().size(), () -> randomIntBetween(1, 10)); @@ -132,7 +134,8 @@ protected SnapshotInfo mutateInstance(SnapshotInfo instance) { instance.shardFailures(), instance.includeGlobalState(), instance.userMetadata(), - instance.isRemoteStoreIndexShallowCopyEnabled() + instance.isRemoteStoreIndexShallowCopyEnabled(), + 0 ); case 2: return new SnapshotInfo( @@ -146,7 +149,8 @@ protected SnapshotInfo mutateInstance(SnapshotInfo instance) { instance.shardFailures(), instance.includeGlobalState(), instance.userMetadata(), - instance.isRemoteStoreIndexShallowCopyEnabled() + instance.isRemoteStoreIndexShallowCopyEnabled(), + 0 ); case 3: return new SnapshotInfo( @@ -160,7 +164,8 @@ protected SnapshotInfo mutateInstance(SnapshotInfo instance) { instance.shardFailures(), instance.includeGlobalState(), instance.userMetadata(), - instance.isRemoteStoreIndexShallowCopyEnabled() + instance.isRemoteStoreIndexShallowCopyEnabled(), + 0 ); case 4: return new SnapshotInfo( @@ -174,7 +179,8 @@ protected SnapshotInfo mutateInstance(SnapshotInfo instance) { instance.shardFailures(), instance.includeGlobalState(), instance.userMetadata(), - instance.isRemoteStoreIndexShallowCopyEnabled() + instance.isRemoteStoreIndexShallowCopyEnabled(), + 0 ); case 5: int totalShards = randomValueOtherThan(instance.totalShards(), () -> randomIntBetween(0, 100)); @@ -200,7 +206,8 @@ protected SnapshotInfo mutateInstance(SnapshotInfo instance) { shardFailures, instance.includeGlobalState(), instance.userMetadata(), - instance.isRemoteStoreIndexShallowCopyEnabled() + instance.isRemoteStoreIndexShallowCopyEnabled(), + 0 ); case 6: return new SnapshotInfo( @@ -214,7 +221,8 @@ protected SnapshotInfo mutateInstance(SnapshotInfo instance) { instance.shardFailures(), Boolean.FALSE.equals(instance.includeGlobalState()), instance.userMetadata(), - instance.isRemoteStoreIndexShallowCopyEnabled() + instance.isRemoteStoreIndexShallowCopyEnabled(), + 0 ); case 7: return new SnapshotInfo( @@ -228,7 +236,8 @@ protected SnapshotInfo mutateInstance(SnapshotInfo instance) { instance.shardFailures(), instance.includeGlobalState(), randomValueOtherThan(instance.userMetadata(), SnapshotInfoTests::randomUserMetadata), - instance.isRemoteStoreIndexShallowCopyEnabled() + instance.isRemoteStoreIndexShallowCopyEnabled(), + 0 ); case 8: List dataStreams = randomValueOtherThan( @@ -246,7 +255,8 @@ protected SnapshotInfo mutateInstance(SnapshotInfo instance) { instance.shardFailures(), instance.includeGlobalState(), instance.userMetadata(), - instance.isRemoteStoreIndexShallowCopyEnabled() + instance.isRemoteStoreIndexShallowCopyEnabled(), + 123456 ); case 9: return new SnapshotInfo( @@ -260,7 +270,8 @@ protected SnapshotInfo mutateInstance(SnapshotInfo instance) { instance.shardFailures(), instance.includeGlobalState(), instance.userMetadata(), - Boolean.FALSE.equals(instance.isRemoteStoreIndexShallowCopyEnabled()) + Boolean.FALSE.equals(instance.isRemoteStoreIndexShallowCopyEnabled()), + 123456 ); default: throw new IllegalArgumentException("invalid randomization case"); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 769dfeb37ff8d..e27223cea0778 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -2379,6 +2379,7 @@ public void onFailure(final Exception e) { clusterService, threadPool, snapshotsService, + repositoriesService, actionFilters, indexNameExpressionResolver ) diff --git a/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepositoryTests.java b/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepositoryTests.java index 43dde7281fb2d..06a486b3cb997 100644 --- a/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepositoryTests.java +++ b/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepositoryTests.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Priority; import org.opensearch.common.UUIDs; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.settings.ClusterSettings; @@ -233,10 +234,12 @@ public void testOverwriteSnapshotInfoBlob() throws Exception { Collections.emptyList(), true, Collections.emptyMap(), - false + false, + 0 ), Version.CURRENT, Function.identity(), + Priority.NORMAL, f )); @@ -259,10 +262,12 @@ public void testOverwriteSnapshotInfoBlob() throws Exception { Collections.emptyList(), true, Collections.emptyMap(), - false + false, + 0 ), Version.CURRENT, Function.identity(), + Priority.NORMAL, f ) ) @@ -287,10 +292,12 @@ public void testOverwriteSnapshotInfoBlob() throws Exception { Collections.emptyList(), true, Collections.emptyMap(), - false + false, + 0 ), Version.CURRENT, Function.identity(), + Priority.NORMAL, f ) ); diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 411509dfe5acc..b5cd12ef0c11f 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -55,7 +55,7 @@ dependencies { exclude group: 'com.nimbusds' exclude module: "commons-configuration2" } - api "dnsjava:dnsjava:3.6.0" + api "dnsjava:dnsjava:3.6.1" api "org.codehaus.jettison:jettison:${versions.jettison}" api "org.apache.commons:commons-compress:${versions.commonscompress}" api "commons-codec:commons-codec:${versions.commonscodec}" diff --git a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java index be2f895301396..1ca1a6969ab2d 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.Priority; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; @@ -125,6 +126,7 @@ public void finalizeSnapshot( SnapshotInfo snapshotInfo, Version repositoryMetaVersion, Function stateTransformer, + Priority repositoryUpdatePriority, ActionListener listener ) { listener.onResponse(null); diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java index 32f445bf24a41..187c30be0044e 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java @@ -298,23 +298,25 @@ private static void assertSnapshotUUIDs(BlobStoreRepository repository, Reposito .stream() .noneMatch(shardFailure -> shardFailure.index().equals(index) && shardFailure.shardId() == shardId)) { final Map shardPathContents = shardContainer.listBlobs(); - - assertTrue( - shardPathContents.containsKey( - String.format(Locale.ROOT, BlobStoreRepository.SHALLOW_SNAPSHOT_NAME_FORMAT, snapshotId.getUUID()) - ) - || shardPathContents.containsKey( - String.format(Locale.ROOT, BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotId.getUUID()) + if (snapshotInfo.getPinnedTimestamp() == 0) { + assertTrue( + shardPathContents.containsKey( + String.format(Locale.ROOT, BlobStoreRepository.SHALLOW_SNAPSHOT_NAME_FORMAT, snapshotId.getUUID()) ) - ); + || shardPathContents.containsKey( + String.format(Locale.ROOT, BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotId.getUUID()) + ) + ); + + assertThat( + shardPathContents.keySet() + .stream() + .filter(name -> name.startsWith(BlobStoreRepository.INDEX_FILE_PREFIX)) + .count(), + lessThanOrEqualTo(2L) + ); + } - assertThat( - shardPathContents.keySet() - .stream() - .filter(name -> name.startsWith(BlobStoreRepository.INDEX_FILE_PREFIX)) - .count(), - lessThanOrEqualTo(2L) - ); } } } diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index ce76914882150..ec9cd5b64353e 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -48,6 +48,7 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Priority; import org.opensearch.common.UUIDs; import org.opensearch.common.action.ActionFuture; import org.opensearch.common.blobstore.BlobContainer; @@ -612,7 +613,8 @@ protected void addBwCFailedSnapshot(String repoName, String snapshotName, Mapget( f -> repo.finalizeSnapshot( @@ -622,6 +624,7 @@ protected void addBwCFailedSnapshot(String repoName, String snapshotName, Map