From 74ffdb6dba6b8e307a2c491cc2498750e5548b8e Mon Sep 17 00:00:00 2001 From: Jay Deng Date: Mon, 9 Oct 2023 21:03:14 -0700 Subject: [PATCH 01/33] Avoid size=0 for terminate_after tests (#10436) Signed-off-by: Jay Deng --- .../search/simple/SimpleSearchIT.java | 29 ++++++++++++++----- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java index 95b36311f6b8b..7aae41d939cac 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java @@ -283,10 +283,9 @@ public void testSimpleDateRange() throws Exception { assertHitCount(searchResponse, 2L); } - public void testSimpleTerminateAfterCount() throws Exception { + public void dotestSimpleTerminateAfterCountWithSize(int size, int max) throws Exception { prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); ensureGreen(); - int max = randomIntBetween(3, 29); List docbuilders = new ArrayList<>(max); for (int i = 1; i <= max; i++) { @@ -299,9 +298,7 @@ public void testSimpleTerminateAfterCount() throws Exception { refresh(); SearchResponse searchResponse; - int size; for (int i = 1; i < max; i++) { - size = randomIntBetween(0, max); searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)) .setTerminateAfter(i) @@ -322,7 +319,18 @@ public void testSimpleTerminateAfterCount() throws Exception { assertFalse(searchResponse.isTerminatedEarly()); } - public void testSimpleTerminateAfterTrackTotalHitsUpTo() throws Exception { + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10435") + public void testSimpleTerminateAfterCountSize0() throws Exception { + int max = randomIntBetween(3, 29); + dotestSimpleTerminateAfterCountWithSize(0, max); + } + + public void testSimpleTerminateAfterCountRandomSize() throws Exception { + int max = randomIntBetween(3, 29); + dotestSimpleTerminateAfterCountWithSize(randomIntBetween(1, max), max); + } + + public void doTestSimpleTerminateAfterTrackTotalHitsUpTo(int size) throws Exception { prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); ensureGreen(); int numDocs = 29; @@ -337,8 +345,6 @@ public void testSimpleTerminateAfterTrackTotalHitsUpTo() throws Exception { ensureGreen(); refresh(); - // size=0 is a special case where topDocsCollector is not added - int size = randomIntBetween(0, 1); SearchResponse searchResponse; searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) @@ -399,6 +405,15 @@ public void testSimpleTerminateAfterTrackTotalHitsUpTo() throws Exception { assertEquals(GREATER_THAN_OR_EQUAL_TO, searchResponse.getHits().getTotalHits().relation); } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10435") + public void testSimpleTerminateAfterTrackTotalHitsUpToRandomSize() throws Exception { + doTestSimpleTerminateAfterTrackTotalHitsUpTo(0); + } + + public void testSimpleTerminateAfterTrackTotalHitsUpToSize0() throws Exception { + doTestSimpleTerminateAfterTrackTotalHitsUpTo(randomIntBetween(1, 29)); + } + public void testSimpleIndexSortEarlyTerminate() throws Exception { prepareCreate("test").setSettings( Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).put("index.sort.field", "rank") From 8e5e54e87f6f86a2425a5c767e208767ba3df14c Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Tue, 10 Oct 2023 16:16:36 +0530 Subject: [PATCH 02/33] Use file length from metadata while downloading segments from remote store (#10399) Signed-off-by: Sachin Kale --- .../index/store/RemoteDirectory.java | 13 +++-- .../store/RemoteSegmentStoreDirectory.java | 3 +- .../index/store/RemoteDirectoryTests.java | 54 +++++++++---------- .../RemoteSegmentStoreDirectoryTests.java | 5 +- 4 files changed, 39 insertions(+), 36 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java index eb75c39532d71..70a88f6159764 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java @@ -39,7 +39,6 @@ import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; @@ -193,10 +192,14 @@ public IndexOutput createOutput(String name, IOContext context) { */ @Override public IndexInput openInput(String name, IOContext context) throws IOException { + return openInput(name, fileLength(name), context); + } + + public IndexInput openInput(String name, long fileLength, IOContext context) throws IOException { InputStream inputStream = null; try { inputStream = blobContainer.readBlob(name); - return new RemoteIndexInput(name, downloadRateLimiter.apply(inputStream), fileLength(name)); + return new RemoteIndexInput(name, downloadRateLimiter.apply(inputStream), fileLength); } catch (Exception e) { // Incase the RemoteIndexInput creation fails, close the input stream to avoid file handler leak. if (inputStream != null) { @@ -230,9 +233,9 @@ public void close() throws IOException { @Override public long fileLength(String name) throws IOException { // ToDo: Instead of calling remote store each time, keep a cache with segment metadata - Map metadata = blobContainer.listBlobsByPrefix(name); - if (metadata.containsKey(name)) { - return metadata.get(name).length(); + List metadata = blobContainer.listBlobsByPrefixInSortedOrder(name, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + if (metadata.size() == 1 && metadata.get(0).name().equals(name)) { + return metadata.get(0).length(); } throw new NoSuchFileException(name); } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index dc9706306b408..c0b302c68c0ea 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -434,8 +434,9 @@ public IndexOutput createOutput(String name, IOContext context) throws IOExcepti @Override public IndexInput openInput(String name, IOContext context) throws IOException { String remoteFilename = getExistingRemoteFilename(name); + long fileLength = fileLength(name); if (remoteFilename != null) { - return remoteDataDirectory.openInput(remoteFilename, context); + return remoteDataDirectory.openInput(remoteFilename, fileLength, context); } else { throw new NoSuchFileException(name); } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java index 3740abc57b02d..9e38e1749d434 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java @@ -29,7 +29,6 @@ import java.nio.file.NoSuchFileException; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -41,11 +40,13 @@ import org.mockito.Mockito; +import static org.opensearch.common.blobstore.BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -204,13 +205,29 @@ public void testCreateOutput() { public void testOpenInput() throws IOException { InputStream mockInputStream = mock(InputStream.class); when(blobContainer.readBlob("segment_1")).thenReturn(mockInputStream); - Map fileInfo = new HashMap<>(); - fileInfo.put("segment_1", new PlainBlobMetadata("segment_1", 100)); - when(blobContainer.listBlobsByPrefix("segment_1")).thenReturn(fileInfo); + + BlobMetadata blobMetadata = new PlainBlobMetadata("segment_1", 100); + + when(blobContainer.listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC)).thenReturn(List.of(blobMetadata)); IndexInput indexInput = remoteDirectory.openInput("segment_1", IOContext.DEFAULT); assertTrue(indexInput instanceof RemoteIndexInput); assertEquals(100, indexInput.length()); + verify(blobContainer).listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC); + } + + public void testOpenInputWithLength() throws IOException { + InputStream mockInputStream = mock(InputStream.class); + when(blobContainer.readBlob("segment_1")).thenReturn(mockInputStream); + + BlobMetadata blobMetadata = new PlainBlobMetadata("segment_1", 100); + + when(blobContainer.listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC)).thenReturn(List.of(blobMetadata)); + + IndexInput indexInput = remoteDirectory.openInput("segment_1", 100, IOContext.DEFAULT); + assertTrue(indexInput instanceof RemoteIndexInput); + assertEquals(100, indexInput.length()); + verify(blobContainer, times(0)).listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC); } public void testOpenInputIOException() throws IOException { @@ -228,9 +245,8 @@ public void testOpenInputNoSuchFileException() throws IOException { } public void testFileLength() throws IOException { - Map fileInfo = new HashMap<>(); - fileInfo.put("segment_1", new PlainBlobMetadata("segment_1", 100)); - when(blobContainer.listBlobsByPrefix("segment_1")).thenReturn(fileInfo); + BlobMetadata blobMetadata = new PlainBlobMetadata("segment_1", 100); + when(blobContainer.listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC)).thenReturn(List.of(blobMetadata)); assertEquals(100, remoteDirectory.fileLength("segment_1")); } @@ -246,13 +262,7 @@ public void testListFilesByPrefixInLexicographicOrder() throws IOException { LatchedActionListener> latchedActionListener = invocation.getArgument(3); latchedActionListener.onResponse(List.of(new PlainBlobMetadata("metadata_1", 1))); return null; - }).when(blobContainer) - .listBlobsByPrefixInSortedOrder( - eq("metadata"), - eq(1), - eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), - any(ActionListener.class) - ); + }).when(blobContainer).listBlobsByPrefixInSortedOrder(eq("metadata"), eq(1), eq(LEXICOGRAPHIC), any(ActionListener.class)); assertEquals(List.of("metadata_1"), remoteDirectory.listFilesByPrefixInLexicographicOrder("metadata", 1)); } @@ -262,13 +272,7 @@ public void testListFilesByPrefixInLexicographicOrderEmpty() throws IOException LatchedActionListener> latchedActionListener = invocation.getArgument(3); latchedActionListener.onResponse(List.of()); return null; - }).when(blobContainer) - .listBlobsByPrefixInSortedOrder( - eq("metadata"), - eq(1), - eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), - any(ActionListener.class) - ); + }).when(blobContainer).listBlobsByPrefixInSortedOrder(eq("metadata"), eq(1), eq(LEXICOGRAPHIC), any(ActionListener.class)); assertEquals(List.of(), remoteDirectory.listFilesByPrefixInLexicographicOrder("metadata", 1)); } @@ -278,13 +282,7 @@ public void testListFilesByPrefixInLexicographicOrderException() { LatchedActionListener> latchedActionListener = invocation.getArgument(3); latchedActionListener.onFailure(new IOException("Error")); return null; - }).when(blobContainer) - .listBlobsByPrefixInSortedOrder( - eq("metadata"), - eq(1), - eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), - any(ActionListener.class) - ); + }).when(blobContainer).listBlobsByPrefixInSortedOrder(eq("metadata"), eq(1), eq(LEXICOGRAPHIC), any(ActionListener.class)); assertThrows(IOException.class, () -> remoteDirectory.listFilesByPrefixInLexicographicOrder("metadata", 1)); } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 0f44d5c3b2f53..3e45699ef36eb 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -71,6 +71,7 @@ import static org.opensearch.test.RemoteStoreTestUtils.createMetadataFileBytes; import static org.opensearch.test.RemoteStoreTestUtils.getDummyMetadata; import static org.hamcrest.CoreMatchers.is; +import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.contains; import static org.mockito.Mockito.any; @@ -391,7 +392,7 @@ public void testOpenInput() throws IOException { remoteSegmentStoreDirectory.init(); IndexInput indexInput = mock(IndexInput.class); - when(remoteDataDirectory.openInput(startsWith("_0.si"), eq(IOContext.DEFAULT))).thenReturn(indexInput); + when(remoteDataDirectory.openInput(startsWith("_0.si"), anyLong(), eq(IOContext.DEFAULT))).thenReturn(indexInput); assertEquals(indexInput, remoteSegmentStoreDirectory.openInput("_0.si", IOContext.DEFAULT)); } @@ -404,7 +405,7 @@ public void testOpenInputException() throws IOException { populateMetadata(); remoteSegmentStoreDirectory.init(); - when(remoteDataDirectory.openInput(startsWith("_0.si"), eq(IOContext.DEFAULT))).thenThrow(new IOException("Error")); + when(remoteDataDirectory.openInput(startsWith("_0.si"), anyLong(), eq(IOContext.DEFAULT))).thenThrow(new IOException("Error")); assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.openInput("_0.si", IOContext.DEFAULT)); } From 506065c91f055cc1f925180afc1005c6989b7418 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Tue, 10 Oct 2023 08:54:50 -0500 Subject: [PATCH 03/33] Fix ScalingThreadPoolTest to handle new pool (#10427) This was a bad use of randomization. The test is super fast so it can be run against every scaling thread pool every time. Signed-off-by: Andrew Ross --- .../threadpool/ScalingThreadPoolTests.java | 23 ++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java index 92bd15d818bca..ba2d4b8c247bb 100644 --- a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java @@ -32,10 +32,13 @@ package org.opensearch.threadpool; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; +import java.util.Collection; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -43,14 +46,29 @@ import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Function; +import java.util.stream.Collectors; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.equalTo; public class ScalingThreadPoolTests extends OpenSearchThreadPoolTestCase { + @ParametersFactory + public static Collection scalingThreadPools() { + return ThreadPool.THREAD_POOL_TYPES.entrySet() + .stream() + .filter(t -> t.getValue().equals(ThreadPool.ThreadPoolType.SCALING)) + .map(e -> new String[] { e.getKey() }) + .collect(Collectors.toList()); + } + + private final String threadPoolName; + + public ScalingThreadPoolTests(String threadPoolName) { + this.threadPoolName = threadPoolName; + } + public void testScalingThreadPoolConfiguration() throws InterruptedException { - final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); final Settings.Builder builder = Settings.builder(); final int core; @@ -136,11 +154,11 @@ private int expectedSize(final String threadPoolName, final int numberOfProcesso sizes.put(ThreadPool.Names.TRANSLOG_SYNC, n -> 4 * n); sizes.put(ThreadPool.Names.REMOTE_PURGE, ThreadPool::halfAllocatedProcessorsMaxFive); sizes.put(ThreadPool.Names.REMOTE_REFRESH_RETRY, ThreadPool::halfAllocatedProcessorsMaxTen); + sizes.put(ThreadPool.Names.REMOTE_RECOVERY, ThreadPool::halfAllocatedProcessorsMaxTen); return sizes.get(threadPoolName).apply(numberOfProcessors); } public void testScalingThreadPoolIsBounded() throws InterruptedException { - final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); final int size = randomIntBetween(32, 512); final Settings settings = Settings.builder().put("thread_pool." + threadPoolName + ".max", size).build(); runScalingThreadPoolTest(settings, (clusterSettings, threadPool) -> { @@ -170,7 +188,6 @@ public void testScalingThreadPoolIsBounded() throws InterruptedException { } public void testScalingThreadPoolThreadsAreTerminatedAfterKeepAlive() throws InterruptedException { - final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); final int min = "generic".equals(threadPoolName) ? 4 : 1; final Settings settings = Settings.builder() .put("thread_pool." + threadPoolName + ".max", 128) From 562e3b2503a3892df2fa849792bf8ab89ce094ca Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Tue, 10 Oct 2023 10:22:18 -0500 Subject: [PATCH 04/33] Fix flaky SearchScrollWithFailingNodesIT (#10374) The test intended to stop a data node and called a method named `stopRandomNonClusterManagerNode()` in order to do that. However, that method would stop a random node that was not the currently elected cluster manager, regardless of node role. I have also renamed that method hoping to be more clear. Resolves #10137 Signed-off-by: Andrew Ross --- .../cluster/MinimumClusterManagerNodesIT.java | 4 ++-- .../opensearch/discovery/DiscoveryDisruptionIT.java | 2 +- .../search/scroll/SearchScrollWithFailingNodesIT.java | 2 +- .../java/org/opensearch/test/InternalTestCluster.java | 10 +++++----- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java index 4c8bf24b1655a..84648eda3d38c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java @@ -317,8 +317,8 @@ public void testThreeNodesNoClusterManagerBlock() throws Exception { ); Settings nonClusterManagerDataPathSettings1 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(0)); Settings nonClusterManagerDataPathSettings2 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(1)); - internalCluster().stopRandomNonClusterManagerNode(); - internalCluster().stopRandomNonClusterManagerNode(); + internalCluster().stopRandomNodeNotCurrentClusterManager(); + internalCluster().stopRandomNodeNotCurrentClusterManager(); logger.info("--> verify that there is no cluster-manager anymore on remaining node"); // spin here to wait till the state is set diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java index a2864b6dfd1da..70124c8c46700 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java @@ -136,7 +136,7 @@ public void testClusterJoinDespiteOfPublishingIssues() throws Exception { // shutting down the nodes, to avoid the leakage check tripping // on the states associated with the commit requests we may have dropped - internalCluster().stopRandomNonClusterManagerNode(); + internalCluster().stopRandomNodeNotCurrentClusterManager(); } public void testClusterFormingWithASlowNode() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java index f16b9a4d67b49..27002b844da1d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java @@ -119,7 +119,7 @@ public void testScanScrollWithShardExceptions() throws Exception { assertThat(numHits, equalTo(100L)); clearScroll("_all"); - internalCluster().stopRandomNonClusterManagerNode(); + internalCluster().stopRandomDataNode(); searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(10).setScroll(TimeValue.timeValueMinutes(1)).get(); assertThat(searchResponse.getSuccessfulShards(), lessThan(searchResponse.getTotalShards())); diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index d3e24ccd90500..acdc5b94804c6 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -1816,7 +1816,7 @@ public synchronized void stopCurrentClusterManagerNode() throws IOException { /** * Stops any of the current nodes but not the cluster-manager node. */ - public synchronized void stopRandomNonClusterManagerNode() throws IOException { + public synchronized void stopRandomNodeNotCurrentClusterManager() throws IOException { NodeAndClient nodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getClusterManagerName()).negate()); if (nodeAndClient != null) { logger.info( @@ -1841,11 +1841,11 @@ public synchronized void stopCurrentMasterNode() throws IOException { /** * Stops any of the current nodes but not the cluster-manager node. * - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #stopRandomNonClusterManagerNode()} + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #stopRandomNodeNotCurrentClusterManager()} */ @Deprecated - public synchronized void stopRandomNonMasterNode() throws IOException { - stopRandomNonClusterManagerNode(); + public synchronized void stopRandomNodeNotCurrentMaster() throws IOException { + stopRandomNodeNotCurrentClusterManager(); } /** @@ -1860,7 +1860,7 @@ public void stopAllNodes() { } int totalClusterManagerNodes = numClusterManagerNodes(); while (totalClusterManagerNodes > 1) { - stopRandomNonClusterManagerNode(); + stopRandomNodeNotCurrentClusterManager(); totalClusterManagerNodes -= 1; } stopCurrentClusterManagerNode(); From a43a16639530c50dc37fd486e67e2af999e82f65 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Wed, 11 Oct 2023 05:05:16 +0800 Subject: [PATCH 05/33] Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure (#10496) * Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure Signed-off-by: Gao Binlong * Modify changelog Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong --- CHANGELOG.md | 1 + .../ingest/SimulatePipelineRequestParsingTests.java | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a039526d3664..ba71604d1a94e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -149,6 +149,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) - Fix registration and initialization of multiple extensions ([10256](https://github.com/opensearch-project/OpenSearch/pull/10256)) - Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) +- Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) ### Security diff --git a/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java b/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java index 04a9d08bb22bc..41f782e308785 100644 --- a/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java +++ b/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java @@ -85,7 +85,7 @@ public void init() throws IOException { when(ingestService.getProcessorFactories()).thenReturn(registry); } - public void testParseUsingPipelineStore(boolean useExplicitType) throws Exception { + public void testParseUsingPipelineStore() throws Exception { int numDocs = randomIntBetween(1, 10); Map requestContent = new HashMap<>(); @@ -131,7 +131,7 @@ public void testParseUsingPipelineStore(boolean useExplicitType) throws Exceptio assertThat(actualRequest.getPipeline().getProcessors().size(), equalTo(1)); } - public void innerTestParseWithProvidedPipeline() throws Exception { + public void testParseWithProvidedPipeline() throws Exception { int numDocs = randomIntBetween(1, 10); Map requestContent = new HashMap<>(); @@ -149,9 +149,9 @@ public void innerTestParseWithProvidedPipeline() throws Exception { doc.put(field.getFieldName(), value); expectedDoc.put(field.getFieldName(), value); } else { - Integer value = randomIntBetween(1, 1000000); + int value = randomIntBetween(1, 1000000); doc.put(field.getFieldName(), value); - expectedDoc.put(field.getFieldName(), value); + expectedDoc.put(field.getFieldName(), (long) value); } } else if (field == VERSION_TYPE) { String value = VersionType.toString(randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE)); @@ -163,9 +163,9 @@ public void innerTestParseWithProvidedPipeline() throws Exception { doc.put(field.getFieldName(), value); expectedDoc.put(field.getFieldName(), value); } else { - Integer value = randomIntBetween(1, 1000000); + int value = randomIntBetween(1, 1000000); doc.put(field.getFieldName(), value); - expectedDoc.put(field.getFieldName(), value); + expectedDoc.put(field.getFieldName(), (long) value); } } else { if (randomBoolean()) { From ffebb0ac5ac30360e47ab5bdde1d5ba39be3e520 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Oct 2023 17:20:37 -0400 Subject: [PATCH 06/33] Bump commons-io:commons-io from 2.13.0 to 2.14.0 in /plugins/repository-hdfs (#10505) * Bump commons-io:commons-io in /plugins/repository-hdfs Bumps commons-io:commons-io from 2.13.0 to 2.14.0. --- updated-dependencies: - dependency-name: commons-io:commons-io dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/commons-io-2.13.0.jar.sha1 | 1 - plugins/repository-hdfs/licenses/commons-io-2.14.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/commons-io-2.13.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/commons-io-2.14.0.jar.sha1 diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 8d9ff01f5db65..1ce29afe0e69a 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -75,7 +75,7 @@ dependencies { api 'commons-collections:commons-collections:3.2.2' api "org.apache.commons:commons-compress:${versions.commonscompress}" api 'org.apache.commons:commons-configuration2:2.9.0' - api 'commons-io:commons-io:2.13.0' + api 'commons-io:commons-io:2.14.0' api 'org.apache.commons:commons-lang3:3.13.0' implementation 'com.google.re2j:re2j:1.7' api 'javax.servlet:servlet-api:2.5' diff --git a/plugins/repository-hdfs/licenses/commons-io-2.13.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-io-2.13.0.jar.sha1 deleted file mode 100644 index c165136eb5822..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-io-2.13.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8bb2bc9b4df17e2411533a0708a69f983bf5e83b \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-io-2.14.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-io-2.14.0.jar.sha1 new file mode 100644 index 0000000000000..33c5cfe53e01d --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-io-2.14.0.jar.sha1 @@ -0,0 +1 @@ +a4c6e1f6c196339473cd2e1b037f0eb97c62755b \ No newline at end of file From 332fae1ee113c16ca478d7a6ab47c897ae3139e7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Oct 2023 17:23:44 -0400 Subject: [PATCH 07/33] Bump org.apache.zookeeper:zookeeper from 3.9.0 to 3.9.1 in /test/fixtures/hdfs-fixture (#10506) * Bump org.apache.zookeeper:zookeeper in /test/fixtures/hdfs-fixture Bumps org.apache.zookeeper:zookeeper from 3.9.0 to 3.9.1. --- updated-dependencies: - dependency-name: org.apache.zookeeper:zookeeper dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 3 ++- test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ba71604d1a94e..05a3fa8cb785b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -119,6 +119,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `peter-evans/create-pull-request` from 3 to 5 ([#10301](https://github.com/opensearch-project/OpenSearch/pull/10301)) - Bump `org.apache.avro:avro` from 1.11.2 to 1.11.3 ([#10210](https://github.com/opensearch-project/OpenSearch/pull/10210)) - Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) +- Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) @@ -154,4 +155,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x \ No newline at end of file diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 7149efe27d694..7de98c0986b87 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -64,7 +64,7 @@ dependencies { api "org.jetbrains.kotlin:kotlin-stdlib:${versions.kotlin}" api "org.eclipse.jetty:jetty-server:${versions.jetty}" api "org.eclipse.jetty.websocket:javax-websocket-server-impl:${versions.jetty}" - api 'org.apache.zookeeper:zookeeper:3.9.0' + api 'org.apache.zookeeper:zookeeper:3.9.1' api "org.apache.commons:commons-text:1.10.0" api "commons-net:commons-net:3.9.0" runtimeOnly "com.google.guava:guava:${versions.guava}" From 3c76cec7ec6c8c78e2a6884e976e7547f60757db Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Oct 2023 17:24:36 -0400 Subject: [PATCH 08/33] Bump com.google.api:gax from 2.32.0 to 2.35.0 in /plugins/repository-gcs (#10507) * Bump com.google.api:gax from 2.32.0 to 2.35.0 in /plugins/repository-gcs Bumps com.google.api:gax from 2.32.0 to 2.35.0. --- updated-dependencies: - dependency-name: com.google.api:gax dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-gcs/build.gradle | 2 +- plugins/repository-gcs/licenses/gax-2.32.0.jar.sha1 | 1 - plugins/repository-gcs/licenses/gax-2.35.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/gax-2.32.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/gax-2.35.0.jar.sha1 diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 52e5e71618d69..da4978608a12f 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -53,7 +53,7 @@ versions << [ dependencies { api 'com.google.api:api-common:1.8.1' - api 'com.google.api:gax:2.32.0' + api 'com.google.api:gax:2.35.0' api 'com.google.api:gax-httpjson:0.103.1' api 'com.google.apis:google-api-services-storage:v1-rev20230617-2.0.0' diff --git a/plugins/repository-gcs/licenses/gax-2.32.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-2.32.0.jar.sha1 deleted file mode 100644 index 9cae74e1c3673..0000000000000 --- a/plugins/repository-gcs/licenses/gax-2.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -522bf3c2a738847b9719eac8ce572be0f84da40a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-2.35.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-2.35.0.jar.sha1 new file mode 100644 index 0000000000000..778922c637dc1 --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-2.35.0.jar.sha1 @@ -0,0 +1 @@ +98d52034cfa6d1b881e16f418894afcfacd89b7a \ No newline at end of file From ef6dafe3948bf5984063927c2b14c9597d6d9df3 Mon Sep 17 00:00:00 2001 From: Kaushal Kumar Date: Tue, 10 Oct 2023 14:30:25 -0700 Subject: [PATCH 09/33] add taskCompletionCount in search_backpressure stats (#10028) * add taskCompletionCount in search_backpressure Signed-off-by: Kaushal Kumar * address comments to use final objects Signed-off-by: Kaushal Kumar * do spotless check run Signed-off-by: Kaushal Kumar * use primitive long to store completionCount Signed-off-by: Kaushal Kumar * use primitive long to store completionCount in searchTaskStats Signed-off-by: Kaushal Kumar * add pr link against the change Signed-off-by: Kaushal Kumar * add taskCompletionCount in search_backpressure Signed-off-by: Kaushal Kumar * address comments to use final objects Signed-off-by: Kaushal Kumar * do spotless check run Signed-off-by: Kaushal Kumar * use primitive long to store completionCount Signed-off-by: Kaushal Kumar * rebase with upstream main Signed-off-by: Kaushal Kumar * rebase with upstream main Signed-off-by: Kaushal Kumar --------- Signed-off-by: Kaushal Kumar --- CHANGELOG.md | 2 ++ .../SearchBackpressureService.java | 2 ++ .../stats/SearchShardTaskStats.java | 18 +++++++++++++++++- .../backpressure/stats/SearchTaskStats.java | 18 +++++++++++++++++- .../SearchBackpressureServiceTests.java | 14 ++++++++------ .../stats/SearchShardTaskStatsTests.java | 7 ++++++- .../stats/SearchTaskStatsTests.java | 2 +- 7 files changed, 53 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 05a3fa8cb785b..467e019270a47 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,8 +54,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) - Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) - Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) +- Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) - Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558)) + ### Deprecated ### Removed diff --git a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java index 4f6c2c327509d..29fbee416a9b8 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java +++ b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java @@ -399,6 +399,7 @@ public SearchBackpressureStats nodeStats() { SearchTaskStats searchTaskStats = new SearchTaskStats( searchBackpressureStates.get(SearchTask.class).getCancellationCount(), searchBackpressureStates.get(SearchTask.class).getLimitReachedCount(), + searchBackpressureStates.get(SearchTask.class).getCompletionCount(), taskTrackers.get(SearchTask.class) .stream() .collect(Collectors.toUnmodifiableMap(t -> TaskResourceUsageTrackerType.fromName(t.name()), t -> t.stats(searchTasks))) @@ -407,6 +408,7 @@ public SearchBackpressureStats nodeStats() { SearchShardTaskStats searchShardTaskStats = new SearchShardTaskStats( searchBackpressureStates.get(SearchShardTask.class).getCancellationCount(), searchBackpressureStates.get(SearchShardTask.class).getLimitReachedCount(), + searchBackpressureStates.get(SearchShardTask.class).getCompletionCount(), taskTrackers.get(SearchShardTask.class) .stream() .collect(Collectors.toUnmodifiableMap(t -> TaskResourceUsageTrackerType.fromName(t.name()), t -> t.stats(searchShardTasks))) diff --git a/server/src/main/java/org/opensearch/search/backpressure/stats/SearchShardTaskStats.java b/server/src/main/java/org/opensearch/search/backpressure/stats/SearchShardTaskStats.java index 678c19d83fb96..ffe97d125b27a 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/stats/SearchShardTaskStats.java +++ b/server/src/main/java/org/opensearch/search/backpressure/stats/SearchShardTaskStats.java @@ -8,6 +8,7 @@ package org.opensearch.search.backpressure.stats; +import org.opensearch.Version; import org.opensearch.common.collect.MapBuilder; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -30,21 +31,29 @@ public class SearchShardTaskStats implements ToXContentObject, Writeable { private final long cancellationCount; private final long limitReachedCount; + private final long completionCount; private final Map resourceUsageTrackerStats; public SearchShardTaskStats( long cancellationCount, long limitReachedCount, + long completionCount, Map resourceUsageTrackerStats ) { this.cancellationCount = cancellationCount; this.limitReachedCount = limitReachedCount; + this.completionCount = completionCount; this.resourceUsageTrackerStats = resourceUsageTrackerStats; } public SearchShardTaskStats(StreamInput in) throws IOException { this.cancellationCount = in.readVLong(); this.limitReachedCount = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + completionCount = in.readVLong(); + } else { + completionCount = -1; + } MapBuilder builder = new MapBuilder<>(); builder.put(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, in.readOptionalWriteable(CpuUsageTracker.Stats::new)); @@ -62,6 +71,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(entry.getKey().getName(), entry.getValue()); } builder.endObject(); + if (completionCount != -1) { + builder.field("completion_count", completionCount); + } builder.startObject("cancellation_stats") .field("cancellation_count", cancellationCount) @@ -75,6 +87,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public void writeTo(StreamOutput out) throws IOException { out.writeVLong(cancellationCount); out.writeVLong(limitReachedCount); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeVLong(completionCount); + } out.writeOptionalWriteable(resourceUsageTrackerStats.get(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER)); out.writeOptionalWriteable(resourceUsageTrackerStats.get(TaskResourceUsageTrackerType.HEAP_USAGE_TRACKER)); @@ -88,11 +103,12 @@ public boolean equals(Object o) { SearchShardTaskStats that = (SearchShardTaskStats) o; return cancellationCount == that.cancellationCount && limitReachedCount == that.limitReachedCount + && completionCount == that.completionCount && resourceUsageTrackerStats.equals(that.resourceUsageTrackerStats); } @Override public int hashCode() { - return Objects.hash(cancellationCount, limitReachedCount, resourceUsageTrackerStats); + return Objects.hash(cancellationCount, limitReachedCount, resourceUsageTrackerStats, completionCount); } } diff --git a/server/src/main/java/org/opensearch/search/backpressure/stats/SearchTaskStats.java b/server/src/main/java/org/opensearch/search/backpressure/stats/SearchTaskStats.java index 302350104bd3a..a7f9b4e3d004f 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/stats/SearchTaskStats.java +++ b/server/src/main/java/org/opensearch/search/backpressure/stats/SearchTaskStats.java @@ -8,6 +8,7 @@ package org.opensearch.search.backpressure.stats; +import org.opensearch.Version; import org.opensearch.common.collect.MapBuilder; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -31,21 +32,29 @@ public class SearchTaskStats implements ToXContentObject, Writeable { private final long cancellationCount; private final long limitReachedCount; + private final long completionCount; private final Map resourceUsageTrackerStats; public SearchTaskStats( long cancellationCount, long limitReachedCount, + long completionCount, Map resourceUsageTrackerStats ) { this.cancellationCount = cancellationCount; this.limitReachedCount = limitReachedCount; + this.completionCount = completionCount; this.resourceUsageTrackerStats = resourceUsageTrackerStats; } public SearchTaskStats(StreamInput in) throws IOException { this.cancellationCount = in.readVLong(); this.limitReachedCount = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + this.completionCount = in.readVLong(); + } else { + this.completionCount = -1; + } MapBuilder builder = new MapBuilder<>(); builder.put(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, in.readOptionalWriteable(CpuUsageTracker.Stats::new)); @@ -63,6 +72,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(entry.getKey().getName(), entry.getValue()); } builder.endObject(); + if (completionCount != -1) { + builder.field("completion_count", completionCount); + } builder.startObject("cancellation_stats") .field("cancellation_count", cancellationCount) @@ -76,6 +88,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public void writeTo(StreamOutput out) throws IOException { out.writeVLong(cancellationCount); out.writeVLong(limitReachedCount); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeVLong(completionCount); + } out.writeOptionalWriteable(resourceUsageTrackerStats.get(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER)); out.writeOptionalWriteable(resourceUsageTrackerStats.get(TaskResourceUsageTrackerType.HEAP_USAGE_TRACKER)); @@ -89,11 +104,12 @@ public boolean equals(Object o) { SearchTaskStats that = (SearchTaskStats) o; return cancellationCount == that.cancellationCount && limitReachedCount == that.limitReachedCount + && completionCount == that.completionCount && resourceUsageTrackerStats.equals(that.resourceUsageTrackerStats); } @Override public int hashCode() { - return Objects.hash(cancellationCount, limitReachedCount, resourceUsageTrackerStats); + return Objects.hash(cancellationCount, limitReachedCount, resourceUsageTrackerStats, completionCount); } } diff --git a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java index f0d930c4c3acb..9778798b706f4 100644 --- a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java +++ b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java @@ -249,10 +249,11 @@ public void testSearchTaskInFlightCancellation() { verify(mockTaskManager, times(10)).cancelTaskAndDescendants(any(), anyString(), anyBoolean(), any()); assertEquals(3, service.getSearchBackpressureState(SearchTask.class).getLimitReachedCount()); - // Verify search backpressure stats. + // Verify search backpressure stats. Since we are not marking any task as completed the completionCount will be 0 + // for SearchTaskStats here. SearchBackpressureStats expectedStats = new SearchBackpressureStats( - new SearchTaskStats(10, 3, Map.of(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, new MockStats(10))), - new SearchShardTaskStats(0, 0, Collections.emptyMap()), + new SearchTaskStats(10, 3, 0, Map.of(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, new MockStats(10))), + new SearchShardTaskStats(0, 0, 0, Collections.emptyMap()), SearchBackpressureMode.ENFORCED ); SearchBackpressureStats actualStats = service.nodeStats(); @@ -323,10 +324,11 @@ public void testSearchShardTaskInFlightCancellation() { verify(mockTaskManager, times(12)).cancelTaskAndDescendants(any(), anyString(), anyBoolean(), any()); assertEquals(3, service.getSearchBackpressureState(SearchShardTask.class).getLimitReachedCount()); - // Verify search backpressure stats. + // Verify search backpressure stats. We are marking 20 SearchShardTasks as completed this should get + // reflected in SearchShardTaskStats. SearchBackpressureStats expectedStats = new SearchBackpressureStats( - new SearchTaskStats(0, 0, Collections.emptyMap()), - new SearchShardTaskStats(12, 3, Map.of(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, new MockStats(12))), + new SearchTaskStats(0, 0, 0, Collections.emptyMap()), + new SearchShardTaskStats(12, 3, 20, Map.of(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, new MockStats(12))), SearchBackpressureMode.ENFORCED ); SearchBackpressureStats actualStats = service.nodeStats(); diff --git a/server/src/test/java/org/opensearch/search/backpressure/stats/SearchShardTaskStatsTests.java b/server/src/test/java/org/opensearch/search/backpressure/stats/SearchShardTaskStatsTests.java index 6478fdfff61d4..f28b82cad30d3 100644 --- a/server/src/test/java/org/opensearch/search/backpressure/stats/SearchShardTaskStatsTests.java +++ b/server/src/test/java/org/opensearch/search/backpressure/stats/SearchShardTaskStatsTests.java @@ -39,6 +39,11 @@ public static SearchShardTaskStats randomInstance() { new ElapsedTimeTracker.Stats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()) ); - return new SearchShardTaskStats(randomNonNegativeLong(), randomNonNegativeLong(), resourceUsageTrackerStats); + return new SearchShardTaskStats( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + resourceUsageTrackerStats + ); } } diff --git a/server/src/test/java/org/opensearch/search/backpressure/stats/SearchTaskStatsTests.java b/server/src/test/java/org/opensearch/search/backpressure/stats/SearchTaskStatsTests.java index eb33bc1c37b7e..cc7aa92826b41 100644 --- a/server/src/test/java/org/opensearch/search/backpressure/stats/SearchTaskStatsTests.java +++ b/server/src/test/java/org/opensearch/search/backpressure/stats/SearchTaskStatsTests.java @@ -40,6 +40,6 @@ public static SearchTaskStats randomInstance() { new ElapsedTimeTracker.Stats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()) ); - return new SearchTaskStats(randomNonNegativeLong(), randomNonNegativeLong(), resourceUsageTrackerStats); + return new SearchTaskStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), resourceUsageTrackerStats); } } From d3c8909f8c7b6c786af101ca0ab3dacaa84b79b1 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Tue, 10 Oct 2023 16:37:06 -0500 Subject: [PATCH 10/33] Mute MoreExpressionIT.testSpecialValueVariable (#10388) Mute the test only for the concurrent search enabled case. See #10079. Signed-off-by: Andrew Ross --- .../org/opensearch/script/expression/MoreExpressionIT.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java index 8ca28a905f216..bb2f652168d5c 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java @@ -504,6 +504,10 @@ public void testInvalidFieldMember() { } public void testSpecialValueVariable() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10079", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); // i.e. _value for aggregations createIndex("test"); ensureGreen("test"); From 00ccfc49f3aa2bf06ddc22ff8f0bc24790b222da Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Tue, 10 Oct 2023 19:54:15 -0500 Subject: [PATCH 11/33] Replace multipart download with parallel file download (#10519) There are a few open issues with the multi-stream download approach: - Recovery stats are not being reported correctly - It is incompatible (short of reopening and re-reading the entire file) with the existing Lucene checksum validation logic - There are some issues with integrating it with the pending client side encryption work Given this, I attempted an experiment where I replaced with multi-stream-within-a-single-file approach with simply parallelizing downloads across files (this is how snapshot restore works). I actually got better results with this approach: recovering a ~52GiB shard took about 4.7 minutes with the multi-stream code versus 3.9 minutes with the parallel file approach (r7g.4xlarge EC2 instance, 500MiB/s EBS volume, S3 as remote repository). I think this is the right approach as it leverages the more battle-tested code path and addresses the three issues listed above. The multi-stream approach still has promise as it will allow us to download very large files faster (whereas this approach they can be the long poll on the transfer operation). However, given that 5GB segments (made up of multiple files in practice) are the norm, we generally aren't dealing with huge files. Signed-off-by: Andrew Ross --- .../org/opensearch/index/IndexModule.java | 7 +- .../org/opensearch/index/IndexService.java | 9 +- .../opensearch/index/shard/IndexShard.java | 47 ++---- .../opensearch/index/shard/StoreRecovery.java | 11 +- .../index/store/RemoteDirectory.java | 4 - .../store/RemoteSegmentStoreDirectory.java | 74 +-------- .../RemoteSegmentStoreDirectoryFactory.java | 11 +- .../store/RemoteStoreFileDownloader.java | 147 ++++++++++++++++++ .../opensearch/indices/IndicesService.java | 9 +- .../indices/recovery/RecoverySettings.java | 4 +- .../RemoteStoreReplicationSource.java | 32 +--- .../main/java/org/opensearch/node/Node.java | 6 +- .../blobstore/BlobStoreRepository.java | 6 +- .../opensearch/index/IndexModuleTests.java | 5 +- .../RemoteStoreRefreshListenerTests.java | 4 +- ...moteSegmentStoreDirectoryFactoryTests.java | 7 +- .../RemoteSegmentStoreDirectoryTests.java | 127 +-------------- .../store/RemoteStoreFileDownloaderTests.java | 119 ++++++++++++++ .../snapshots/SnapshotResiliencyTests.java | 5 +- .../index/shard/IndexShardTestCase.java | 11 +- 20 files changed, 333 insertions(+), 312 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java create mode 100644 server/src/test/java/org/opensearch/index/store/RemoteStoreFileDownloaderTests.java diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 8692876412ea9..e29283724ebf8 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -81,6 +81,7 @@ import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.repositories.RepositoriesService; @@ -602,7 +603,8 @@ public IndexService newIndexService( IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, BiFunction translogFactorySupplier, Supplier clusterDefaultRefreshIntervalSupplier, - Supplier clusterRemoteTranslogBufferIntervalSupplier + Supplier clusterRemoteTranslogBufferIntervalSupplier, + RecoverySettings recoverySettings ) throws IOException { final IndexEventListener eventListener = freeze(); Function> readerWrapperFactory = indexReaderWrapper @@ -660,7 +662,8 @@ public IndexService newIndexService( recoveryStateFactory, translogFactorySupplier, clusterDefaultRefreshIntervalSupplier, - clusterRemoteTranslogBufferIntervalSupplier + clusterRemoteTranslogBufferIntervalSupplier, + recoverySettings ); success = true; return indexService; diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index af23145be9f89..a53dbe246fa44 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -89,13 +89,13 @@ import org.opensearch.index.shard.ShardNotInPrimaryModeException; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.similarity.SimilarityService; -import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogFactory; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.plugins.IndexStorePlugin; @@ -179,6 +179,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final BiFunction translogFactorySupplier; private final Supplier clusterDefaultRefreshIntervalSupplier; private final Supplier clusterRemoteTranslogBufferIntervalSupplier; + private final RecoverySettings recoverySettings; public IndexService( IndexSettings indexSettings, @@ -213,7 +214,8 @@ public IndexService( IndexStorePlugin.RecoveryStateFactory recoveryStateFactory, BiFunction translogFactorySupplier, Supplier clusterDefaultRefreshIntervalSupplier, - Supplier clusterRemoteTranslogBufferIntervalSupplier + Supplier clusterRemoteTranslogBufferIntervalSupplier, + RecoverySettings recoverySettings ) { super(indexSettings); this.allowExpensiveQueries = allowExpensiveQueries; @@ -290,6 +292,7 @@ public IndexService( this.retentionLeaseSyncTask = new AsyncRetentionLeaseSyncTask(this); this.translogFactorySupplier = translogFactorySupplier; this.clusterRemoteTranslogBufferIntervalSupplier = clusterRemoteTranslogBufferIntervalSupplier; + this.recoverySettings = recoverySettings; updateFsyncTaskIfNecessary(); } @@ -522,7 +525,7 @@ public synchronized IndexShard createShard( remoteStoreStatsTrackerFactory, clusterRemoteTranslogBufferIntervalSupplier, nodeEnv.nodeId(), - (RemoteSegmentStoreDirectoryFactory) remoteDirectoryFactory + recoverySettings ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 251f9a5ae01c0..2643af501bbc9 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -62,7 +62,6 @@ import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.upgrade.post.UpgradeRequest; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.replication.PendingReplicationActions; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.cluster.metadata.DataStream; @@ -160,9 +159,8 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.PrimaryReplicaSyncer.ResyncTask; import org.opensearch.index.similarity.SimilarityService; -import org.opensearch.index.store.DirectoryFileTransferTracker; import org.opensearch.index.store.RemoteSegmentStoreDirectory; -import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; +import org.opensearch.index.store.RemoteStoreFileDownloader; import org.opensearch.index.store.Store; import org.opensearch.index.store.Store.MetadataSnapshot; import org.opensearch.index.store.StoreFileMetadata; @@ -184,6 +182,7 @@ import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryFailedException; import org.opensearch.indices.recovery.RecoveryListener; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; @@ -201,7 +200,6 @@ import java.nio.channels.FileChannel; import java.nio.charset.StandardCharsets; import java.nio.file.NoSuchFileException; -import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -342,7 +340,7 @@ Runnable getGlobalCheckpointSyncer() { private final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; private final List internalRefreshListener = new ArrayList<>(); - private final RemoteSegmentStoreDirectoryFactory remoteSegmentStoreDirectoryFactory; + private final RemoteStoreFileDownloader fileDownloader; public IndexShard( final ShardRouting shardRouting, @@ -371,10 +369,7 @@ public IndexShard( final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, final Supplier clusterRemoteTranslogBufferIntervalSupplier, final String nodeId, - // Wiring a directory factory here breaks some intended abstractions, but this remote directory - // factory is used not as a Lucene directory but instead to copy files from a remote store when - // restoring a shallow snapshot. - @Nullable final RemoteSegmentStoreDirectoryFactory remoteSegmentStoreDirectoryFactory + final RecoverySettings recoverySettings ) throws IOException { super(shardRouting.shardId(), indexSettings); assert shardRouting.initializing(); @@ -470,7 +465,7 @@ public boolean shouldCache(Query query) { ? false : mapperService.documentMapper().mappers().containsTimeStampField(); this.remoteStoreStatsTrackerFactory = remoteStoreStatsTrackerFactory; - this.remoteSegmentStoreDirectoryFactory = remoteSegmentStoreDirectoryFactory; + this.fileDownloader = new RemoteStoreFileDownloader(shardRouting.shardId(), threadPool, recoverySettings); } public ThreadPool getThreadPool() { @@ -568,6 +563,10 @@ public String getNodeId() { return translogConfig.getNodeId(); } + public RemoteStoreFileDownloader getFileDownloader() { + return fileDownloader; + } + @Override public void updateShardState( final ShardRouting newRouting, @@ -2706,7 +2705,7 @@ public void restoreFromRemoteStore(ActionListener listener) { public void restoreFromSnapshotAndRemoteStore( Repository repository, - RemoteSegmentStoreDirectoryFactory remoteSegmentStoreDirectoryFactory, + RepositoriesService repositoriesService, ActionListener listener ) { try { @@ -2714,7 +2713,7 @@ public void restoreFromSnapshotAndRemoteStore( assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT : "invalid recovery type: " + recoveryState.getRecoverySource(); StoreRecovery storeRecovery = new StoreRecovery(shardId, logger); - storeRecovery.recoverFromSnapshotAndRemoteStore(this, repository, remoteSegmentStoreDirectoryFactory, listener); + storeRecovery.recoverFromSnapshotAndRemoteStore(this, repository, repositoriesService, listener, threadPool); } catch (Exception e) { listener.onFailure(e); } @@ -3554,7 +3553,7 @@ public void startRecovery( "from snapshot and remote store", recoveryState, recoveryListener, - l -> restoreFromSnapshotAndRemoteStore(repositoriesService.repository(repo), remoteSegmentStoreDirectoryFactory, l) + l -> restoreFromSnapshotAndRemoteStore(repositoriesService.repository(repo), repositoriesService, l) ); // indicesService.indexService(shardRouting.shardId().getIndex()).addMetadataListener(); } else { @@ -4912,7 +4911,7 @@ private String copySegmentFiles( if (toDownloadSegments.isEmpty() == false) { try { - downloadSegments(storeDirectory, sourceRemoteDirectory, targetRemoteDirectory, toDownloadSegments, onFileSync); + fileDownloader.download(sourceRemoteDirectory, storeDirectory, targetRemoteDirectory, toDownloadSegments, onFileSync); } catch (Exception e) { throw new IOException("Error occurred when downloading segments from remote store", e); } @@ -4925,26 +4924,6 @@ private String copySegmentFiles( return segmentNFile; } - private void downloadSegments( - Directory storeDirectory, - RemoteSegmentStoreDirectory sourceRemoteDirectory, - RemoteSegmentStoreDirectory targetRemoteDirectory, - Set toDownloadSegments, - final Runnable onFileSync - ) throws IOException { - final Path indexPath = store.shardPath() == null ? null : store.shardPath().resolveIndex(); - final DirectoryFileTransferTracker fileTransferTracker = store.getDirectoryFileTransferTracker(); - for (String segment : toDownloadSegments) { - final PlainActionFuture segmentListener = PlainActionFuture.newFuture(); - sourceRemoteDirectory.copyTo(segment, storeDirectory, indexPath, fileTransferTracker, segmentListener); - segmentListener.actionGet(); - onFileSync.run(); - if (targetRemoteDirectory != null) { - targetRemoteDirectory.copyFrom(storeDirectory, segment, segment, IOContext.DEFAULT); - } - } - } - private boolean localDirectoryContains(Directory localDirectory, String file, long checksum) { try (IndexInput indexInput = localDirectory.openInput(file, IOContext.DEFAULT)) { if (checksum == CodecUtil.retrieveChecksum(indexInput)) { diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 762aab51469d0..c0211e1257c8e 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -70,7 +70,9 @@ import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.repositories.IndexId; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.nio.channels.FileChannel; @@ -360,8 +362,9 @@ void recoverFromRepository(final IndexShard indexShard, Repository repository, A void recoverFromSnapshotAndRemoteStore( final IndexShard indexShard, Repository repository, - RemoteSegmentStoreDirectoryFactory directoryFactory, - ActionListener listener + RepositoriesService repositoriesService, + ActionListener listener, + ThreadPool threadPool ) { try { if (canRecover(indexShard)) { @@ -389,6 +392,10 @@ void recoverFromSnapshotAndRemoteStore( remoteStoreRepository = shallowCopyShardMetadata.getRemoteStoreRepository(); } + RemoteSegmentStoreDirectoryFactory directoryFactory = new RemoteSegmentStoreDirectoryFactory( + () -> repositoriesService, + threadPool + ); RemoteSegmentStoreDirectory sourceRemoteDirectory = (RemoteSegmentStoreDirectory) directoryFactory.newDirectory( remoteStoreRepository, indexUUID, diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java index 70a88f6159764..345583bbbd1be 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java @@ -336,10 +336,6 @@ public boolean copyFrom( return false; } - protected UnaryOperator getDownloadRateLimiter() { - return downloadRateLimiter; - } - private void uploadBlob( Directory from, String src, diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index c0b302c68c0ea..be1f2341236ab 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -24,8 +24,6 @@ import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.Version; import org.opensearch.common.UUIDs; -import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; -import org.opensearch.common.blobstore.stream.read.listener.ReadContextListener; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.VersionedCodecStreamWrapper; import org.opensearch.common.logging.Loggers; @@ -38,7 +36,6 @@ import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; -import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.threadpool.ThreadPool; @@ -46,7 +43,6 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.NoSuchFileException; -import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -93,8 +89,6 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement private final ThreadPool threadPool; - private final RecoverySettings recoverySettings; - /** * Keeps track of local segment filename to uploaded filename along with other attributes like checksum. * This map acts as a cache layer for uploaded segment filenames which helps avoid calling listAll() each time. @@ -127,15 +121,13 @@ public RemoteSegmentStoreDirectory( RemoteDirectory remoteMetadataDirectory, RemoteStoreLockManager mdLockManager, ThreadPool threadPool, - ShardId shardId, - RecoverySettings recoverySettings + ShardId shardId ) throws IOException { super(remoteDataDirectory); this.remoteDataDirectory = remoteDataDirectory; this.remoteMetadataDirectory = remoteMetadataDirectory; this.mdLockManager = mdLockManager; this.threadPool = threadPool; - this.recoverySettings = recoverySettings; this.logger = Loggers.getLogger(getClass(), shardId); init(); } @@ -473,70 +465,6 @@ public void copyFrom(Directory from, String src, IOContext context, ActionListen } } - /** - * Copies an existing {@code source} file from this directory to a non-existent file (also - * named {@code source}) in either {@code destinationDirectory} or {@code destinationPath}. - * If the blob container backing this directory supports multipart downloads, the {@code source} - * file will be downloaded (potentially in multiple concurrent parts) directly to - * {@code destinationPath}. This method will return immediately and {@code fileCompletionListener} - * will be notified upon completion. - *

- * If multipart downloads are not supported, then {@code source} file will be copied to a file named - * {@code source} in a single part to {@code destinationDirectory}. The download will happen on the - * calling thread and {@code fileCompletionListener} will be notified synchronously before this - * method returns. - * - * @param source The source file name - * @param destinationDirectory The destination directory (if multipart is not supported) - * @param destinationPath The destination path (if multipart is supported) - * @param fileTransferTracker Tracker used for file transfer stats - * @param fileCompletionListener The listener to notify of completion - */ - public void copyTo( - String source, - Directory destinationDirectory, - Path destinationPath, - DirectoryFileTransferTracker fileTransferTracker, - ActionListener fileCompletionListener - ) { - final String blobName = getExistingRemoteFilename(source); - if (destinationPath != null && remoteDataDirectory.getBlobContainer() instanceof AsyncMultiStreamBlobContainer) { - long length = 0L; - try { - length = fileLength(source); - } catch (IOException ex) { - logger.error("Unable to fetch segment length for stats tracking", ex); - } - final long fileLength = length; - final long startTime = System.currentTimeMillis(); - fileTransferTracker.addTransferredBytesStarted(fileLength); - final AsyncMultiStreamBlobContainer blobContainer = (AsyncMultiStreamBlobContainer) remoteDataDirectory.getBlobContainer(); - final Path destinationFilePath = destinationPath.resolve(source); - final ActionListener completionListener = ActionListener.wrap(response -> { - fileTransferTracker.addTransferredBytesSucceeded(fileLength, startTime); - fileCompletionListener.onResponse(response); - }, e -> { - fileTransferTracker.addTransferredBytesFailed(fileLength, startTime); - fileCompletionListener.onFailure(e); - }); - final ReadContextListener readContextListener = new ReadContextListener( - blobName, - destinationFilePath, - completionListener, - threadPool, - remoteDataDirectory.getDownloadRateLimiter(), - recoverySettings.getMaxConcurrentRemoteStoreStreams() - ); - blobContainer.readBlobAsync(blobName, readContextListener); - } else { - // Fallback to older mechanism of downloading the file - ActionListener.completeWith(fileCompletionListener, () -> { - destinationDirectory.copyFrom(this, source, source, IOContext.DEFAULT); - return source; - }); - } - } - /** * This acquires a lock on a given commit by creating a lock file in lock directory using {@code FileLockInfo} * diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index cc55380894ecd..a5e89ec6a8327 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -15,7 +15,6 @@ import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; -import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -35,18 +34,12 @@ public class RemoteSegmentStoreDirectoryFactory implements IndexStorePlugin.Dire private static final String SEGMENTS = "segments"; private final Supplier repositoriesService; - private final RecoverySettings recoverySettings; private final ThreadPool threadPool; - public RemoteSegmentStoreDirectoryFactory( - Supplier repositoriesService, - ThreadPool threadPool, - RecoverySettings recoverySettings - ) { + public RemoteSegmentStoreDirectoryFactory(Supplier repositoriesService, ThreadPool threadPool) { this.repositoriesService = repositoriesService; this.threadPool = threadPool; - this.recoverySettings = recoverySettings; } @Override @@ -78,7 +71,7 @@ public Directory newDirectory(String repositoryName, String indexUUID, ShardId s String.valueOf(shardId.id()) ); - return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool, shardId, recoverySettings); + return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool, shardId); } catch (RepositoryMissingException e) { throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", e); } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java b/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java new file mode 100644 index 0000000000000..4fc721f2b96b5 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java @@ -0,0 +1,147 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.logging.Loggers; +import org.opensearch.common.util.concurrent.UncategorizedExecutionException; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.Collection; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ExecutionException; + +/** + * Helper class to downloads files from a {@link RemoteSegmentStoreDirectory} + * instance to a local {@link Directory} instance in parallel depending on thread + * pool size and recovery settings. + */ +@InternalApi +public final class RemoteStoreFileDownloader { + private final Logger logger; + private final ThreadPool threadPool; + private final RecoverySettings recoverySettings; + + public RemoteStoreFileDownloader(ShardId shardId, ThreadPool threadPool, RecoverySettings recoverySettings) { + this.logger = Loggers.getLogger(RemoteStoreFileDownloader.class, shardId); + this.threadPool = threadPool; + this.recoverySettings = recoverySettings; + } + + /** + * Copies the given segments from the remote segment store to the given + * local directory. + * @param source The remote directory to copy segment files from + * @param destination The local directory to copy segment files to + * @param toDownloadSegments The list of segment files to download + */ + public void download(Directory source, Directory destination, Collection toDownloadSegments) throws IOException { + downloadInternal(source, destination, null, toDownloadSegments, () -> {}); + } + + /** + * Copies the given segments from the remote segment store to the given + * local directory, while also copying the segments _to_ another remote directory. + * @param source The remote directory to copy segment files from + * @param destination The local directory to copy segment files to + * @param secondDestination The second remote directory that segment files are + * copied to after being copied to the local directory + * @param toDownloadSegments The list of segment files to download + * @param onFileCompletion A generic runnable that is invoked after each file download. + * Must be thread safe as this may be invoked concurrently from + * different threads. + */ + public void download( + Directory source, + Directory destination, + Directory secondDestination, + Collection toDownloadSegments, + Runnable onFileCompletion + ) throws IOException { + downloadInternal(source, destination, secondDestination, toDownloadSegments, onFileCompletion); + } + + private void downloadInternal( + Directory source, + Directory destination, + @Nullable Directory secondDestination, + Collection toDownloadSegments, + Runnable onFileCompletion + ) throws IOException { + final Queue queue = new ConcurrentLinkedQueue<>(toDownloadSegments); + // Choose the minimum of: + // - number of files to download + // - max thread pool size + // - "indices.recovery.max_concurrent_remote_store_streams" setting + final int threads = Math.min( + toDownloadSegments.size(), + Math.min(threadPool.info(ThreadPool.Names.REMOTE_RECOVERY).getMax(), recoverySettings.getMaxConcurrentRemoteStoreStreams()) + ); + logger.trace("Starting download of {} files with {} threads", queue.size(), threads); + final PlainActionFuture> listener = PlainActionFuture.newFuture(); + final ActionListener allFilesListener = new GroupedActionListener<>(listener, threads); + for (int i = 0; i < threads; i++) { + copyOneFile(source, destination, secondDestination, queue, onFileCompletion, allFilesListener); + } + try { + listener.actionGet(); + } catch (UncategorizedExecutionException e) { + // Any IOException will be double-wrapped so dig it out and throw it + if (e.getCause() instanceof ExecutionException) { + if (e.getCause().getCause() instanceof IOException) { + throw (IOException) e.getCause().getCause(); + } + } + throw e; + } + } + + private void copyOneFile( + Directory source, + Directory destination, + @Nullable Directory secondDestination, + Queue queue, + Runnable onFileCompletion, + ActionListener listener + ) { + final String file = queue.poll(); + if (file == null) { + // Queue is empty, so notify listener we are done + listener.onResponse(null); + } else { + threadPool.executor(ThreadPool.Names.REMOTE_RECOVERY).submit(() -> { + logger.trace("Downloading file {}", file); + try { + destination.copyFrom(source, file, file, IOContext.DEFAULT); + onFileCompletion.run(); + if (secondDestination != null) { + secondDestination.copyFrom(destination, file, file, IOContext.DEFAULT); + } + } catch (Exception e) { + // Clear the queue to stop any future processing, report the failure, then return + queue.clear(); + listener.onFailure(e); + return; + } + copyOneFile(source, destination, secondDestination, queue, onFileCompletion, listener); + }); + } + } +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index a72142e65c5e8..e27a597007b62 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -144,6 +144,7 @@ import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryListener; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationType; @@ -335,6 +336,7 @@ public class IndicesService extends AbstractLifecycleComponent private final CountDownLatch closeLatch = new CountDownLatch(1); private volatile boolean idFieldDataEnabled; private volatile boolean allowExpensiveQueries; + private final RecoverySettings recoverySettings; @Nullable private final OpenSearchThreadPoolExecutor danglingIndicesThreadPoolExecutor; @@ -380,7 +382,8 @@ public IndicesService( Supplier repositoriesServiceSupplier, FileCacheCleaner fileCacheCleaner, SearchRequestStats searchRequestStats, - @Nullable RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory + @Nullable RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + RecoverySettings recoverySettings ) { this.settings = settings; this.threadPool = threadPool; @@ -477,6 +480,7 @@ protected void closeInternal() { this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings() .addSettingsUpdateConsumer(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, this::setClusterRemoteTranslogBufferInterval); + this.recoverySettings = recoverySettings; } /** @@ -874,7 +878,8 @@ private synchronized IndexService createIndexService( remoteDirectoryFactory, translogFactorySupplier, this::getClusterDefaultRefreshInterval, - this::getClusterRemoteTranslogBufferInterval + this::getClusterRemoteTranslogBufferInterval, + this.recoverySettings ); } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java index ed9755bf824ea..44dfb2f4cb00a 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java @@ -85,11 +85,11 @@ public class RecoverySettings { ); /** - * Controls the maximum number of streams that can be started concurrently when downloading from the remote store. + * Controls the maximum number of streams that can be started concurrently per recovery when downloading from the remote store. */ public static final Setting INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING = Setting.intSetting( "indices.recovery.max_concurrent_remote_store_streams", - 20, + 10, 1, Property.Dynamic, Property.NodeScope diff --git a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java index ddbcb86269aa9..d2000a56401f5 100644 --- a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java @@ -14,20 +14,16 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.util.Version; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; -import org.opensearch.index.shard.ShardPath; -import org.opensearch.index.store.DirectoryFileTransferTracker; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; -import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -109,49 +105,31 @@ public void getSegmentFiles( logger.debug("Downloading segment files from remote store {}", filesToFetch); RemoteSegmentMetadata remoteSegmentMetadata = remoteDirectory.readLatestMetadataFile(); - List toDownloadSegments = new ArrayList<>(); Collection directoryFiles = List.of(indexShard.store().directory().listAll()); if (remoteSegmentMetadata != null) { try { indexShard.store().incRef(); indexShard.remoteStore().incRef(); final Directory storeDirectory = indexShard.store().directory(); - final ShardPath shardPath = indexShard.shardPath(); + final List toDownloadSegmentNames = new ArrayList<>(); for (StoreFileMetadata fileMetadata : filesToFetch) { String file = fileMetadata.name(); assert directoryFiles.contains(file) == false : "Local store already contains the file " + file; - toDownloadSegments.add(fileMetadata); + toDownloadSegmentNames.add(file); } - final DirectoryFileTransferTracker fileTransferTracker = indexShard.store().getDirectoryFileTransferTracker(); - downloadSegments(storeDirectory, remoteDirectory, toDownloadSegments, shardPath, fileTransferTracker, listener); - logger.debug("Downloaded segment files from remote store {}", toDownloadSegments); + indexShard.getFileDownloader().download(remoteDirectory, storeDirectory, toDownloadSegmentNames); + logger.debug("Downloaded segment files from remote store {}", filesToFetch); } finally { indexShard.store().decRef(); indexShard.remoteStore().decRef(); } } + listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); } catch (Exception e) { listener.onFailure(e); } } - private void downloadSegments( - Directory storeDirectory, - RemoteSegmentStoreDirectory remoteStoreDirectory, - List toDownloadSegments, - ShardPath shardPath, - DirectoryFileTransferTracker fileTransferTracker, - ActionListener completionListener - ) { - final Path indexPath = shardPath == null ? null : shardPath.resolveIndex(); - for (StoreFileMetadata storeFileMetadata : toDownloadSegments) { - final PlainActionFuture segmentListener = PlainActionFuture.newFuture(); - remoteStoreDirectory.copyTo(storeFileMetadata.name(), storeDirectory, indexPath, fileTransferTracker, segmentListener); - segmentListener.actionGet(); - } - completionListener.onResponse(new GetSegmentFilesResponse(toDownloadSegments)); - } - @Override public String getDescription() { return "RemoteStoreReplicationSource"; diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index f44c8c8bea7f1..5b3b064a47c66 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -771,8 +771,7 @@ protected Node( final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( repositoriesServiceReference::get, - threadPool, - recoverySettings + threadPool ); final SearchRequestStats searchRequestStats = new SearchRequestStats(); @@ -803,7 +802,8 @@ protected Node( repositoriesServiceReference::get, fileCacheCleaner, searchRequestStats, - remoteStoreStatsTrackerFactory + remoteStoreStatsTrackerFactory, + recoverySettings ); final AliasValidator aliasValidator = new AliasValidator(); diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index bf85e19493203..8e6649c366eeb 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -1144,8 +1144,7 @@ private void executeStaleShardDelete( // see https://github.com/opensearch-project/OpenSearch/issues/8469 new RemoteSegmentStoreDirectoryFactory( remoteStoreLockManagerFactory.getRepositoriesService(), - threadPool, - recoverySettings + threadPool ).newDirectory( remoteStoreRepoForIndex, indexUUID, @@ -1615,8 +1614,7 @@ private void executeOneStaleIndexDelete( // see https://github.com/opensearch-project/OpenSearch/issues/8469 new RemoteSegmentStoreDirectoryFactory( remoteStoreLockManagerFactory.getRepositoriesService(), - threadPool, - recoverySettings + threadPool ).newDirectory( remoteStoreRepoForIndex, indexUUID, diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index bbd73bcf97aab..97bc822be7d51 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -258,10 +258,11 @@ private IndexService newIndexService(IndexModule module) throws IOException { writableRegistry(), () -> false, null, - new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool, DefaultRecoverySettings.INSTANCE), + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), translogFactorySupplier, () -> IndexSettings.DEFAULT_REFRESH_INTERVAL, - () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + DefaultRecoverySettings.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 941f2f48e71af..5a13f57db2c87 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -33,7 +33,6 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils; import org.opensearch.index.store.Store; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; -import org.opensearch.indices.recovery.DefaultRecoverySettings; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.threadpool.ThreadPool; @@ -156,8 +155,7 @@ public void testRemoteDirectoryInitThrowsException() throws IOException { remoteMetadataDirectory, mock(RemoteStoreLockManager.class), mock(ThreadPool.class), - shardId, - DefaultRecoverySettings.INSTANCE + shardId ); FilterDirectory remoteStoreFilterDirectory = new RemoteStoreRefreshListenerTests.TestFilterDirectory( new RemoteStoreRefreshListenerTests.TestFilterDirectory(remoteSegmentStoreDirectory) diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java index 78c7fe64cebd9..cad5e47531cc6 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java @@ -20,7 +20,6 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.ShardPath; -import org.opensearch.indices.recovery.DefaultRecoverySettings; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -58,11 +57,7 @@ public void setup() { repositoriesService = mock(RepositoriesService.class); threadPool = mock(ThreadPool.class); when(repositoriesServiceSupplier.get()).thenReturn(repositoriesService); - remoteSegmentStoreDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( - repositoriesServiceSupplier, - threadPool, - DefaultRecoverySettings.INSTANCE - ); + remoteSegmentStoreDirectoryFactory = new RemoteSegmentStoreDirectoryFactory(repositoriesServiceSupplier, threadPool); } public void testNewDirectory() throws IOException { diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 3e45699ef36eb..36cfd84ff960a 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -25,9 +25,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; -import org.opensearch.common.blobstore.stream.read.ReadContext; import org.opensearch.common.blobstore.stream.write.WriteContext; -import org.opensearch.common.io.InputStreamContainer; import org.opensearch.common.io.VersionedCodecStreamWrapper; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.store.ByteArrayIndexInput; @@ -42,7 +40,6 @@ import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; -import org.opensearch.indices.recovery.DefaultRecoverySettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.threadpool.ThreadPool; import org.junit.After; @@ -51,18 +48,15 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.nio.file.NoSuchFileException; -import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; -import java.util.function.UnaryOperator; import org.mockito.Mockito; @@ -72,8 +66,6 @@ import static org.opensearch.test.RemoteStoreTestUtils.getDummyMetadata; import static org.hamcrest.CoreMatchers.is; import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.contains; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; @@ -151,14 +143,12 @@ public void setup() throws IOException { remoteMetadataDirectory, mdLockManager, threadPool, - indexShard.shardId(), - DefaultRecoverySettings.INSTANCE + indexShard.shardId() ); try (Store store = indexShard.store()) { segmentInfos = store.readLastCommittedSegmentsInfo(); } - when(remoteDataDirectory.getDownloadRateLimiter()).thenReturn(UnaryOperator.identity()); when(threadPool.executor(ThreadPool.Names.REMOTE_PURGE)).thenReturn(executorService); when(threadPool.executor(ThreadPool.Names.REMOTE_RECOVERY)).thenReturn(executorService); } @@ -568,118 +558,6 @@ public void onFailure(Exception e) {} storeDirectory.close(); } - public void testCopyFilesToMultipart() throws Exception { - String filename = "_0.cfe"; - populateMetadata(); - remoteSegmentStoreDirectory.init(); - - Directory storeDirectory = mock(Directory.class); - AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); - when(remoteDataDirectory.getBlobContainer()).thenReturn(blobContainer); - - Mockito.doAnswer(invocation -> { - ActionListener completionListener = invocation.getArgument(1); - final CompletableFuture future = new CompletableFuture<>(); - future.complete(new InputStreamContainer(new ByteArrayInputStream(new byte[] { 42 }), 0, 1)); - completionListener.onResponse(new ReadContext(1, List.of(() -> future), "")); - return null; - }).when(blobContainer).readBlobAsync(any(), any()); - - CountDownLatch downloadLatch = new CountDownLatch(1); - ActionListener completionListener = new ActionListener<>() { - @Override - public void onResponse(String unused) { - downloadLatch.countDown(); - } - - @Override - public void onFailure(Exception e) {} - }; - Path path = createTempDir(); - DirectoryFileTransferTracker directoryFileTransferTracker = new DirectoryFileTransferTracker(); - long sourceFileLengthInBytes = remoteSegmentStoreDirectory.fileLength(filename); - remoteSegmentStoreDirectory.copyTo(filename, storeDirectory, path, directoryFileTransferTracker, completionListener); - assertTrue(downloadLatch.await(5000, TimeUnit.SECONDS)); - verify(blobContainer, times(1)).readBlobAsync(contains(filename), any()); - verify(storeDirectory, times(0)).copyFrom(any(), any(), any(), any()); - - // Verify stats are updated to DirectoryFileTransferTracker - assertEquals(sourceFileLengthInBytes, directoryFileTransferTracker.getTransferredBytesSucceeded()); - } - - public void testCopyFilesTo() throws Exception { - String filename = "_0.cfe"; - populateMetadata(); - remoteSegmentStoreDirectory.init(); - - Directory storeDirectory = mock(Directory.class); - CountDownLatch downloadLatch = new CountDownLatch(1); - ActionListener completionListener = new ActionListener<>() { - @Override - public void onResponse(String unused) { - downloadLatch.countDown(); - } - - @Override - public void onFailure(Exception e) {} - }; - Path path = createTempDir(); - remoteSegmentStoreDirectory.copyTo(filename, storeDirectory, path, new DirectoryFileTransferTracker(), completionListener); - assertTrue(downloadLatch.await(5000, TimeUnit.MILLISECONDS)); - verify(storeDirectory, times(1)).copyFrom(any(), eq(filename), eq(filename), eq(IOContext.DEFAULT)); - } - - public void testCopyFilesToEmptyPath() throws Exception { - String filename = "_0.cfe"; - populateMetadata(); - remoteSegmentStoreDirectory.init(); - - Directory storeDirectory = mock(Directory.class); - AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); - when(remoteDataDirectory.getBlobContainer()).thenReturn(blobContainer); - - CountDownLatch downloadLatch = new CountDownLatch(1); - ActionListener completionListener = new ActionListener<>() { - @Override - public void onResponse(String unused) { - downloadLatch.countDown(); - } - - @Override - public void onFailure(Exception e) {} - }; - remoteSegmentStoreDirectory.copyTo(filename, storeDirectory, null, new DirectoryFileTransferTracker(), completionListener); - assertTrue(downloadLatch.await(5000, TimeUnit.MILLISECONDS)); - verify(storeDirectory, times(1)).copyFrom(any(), eq(filename), eq(filename), eq(IOContext.DEFAULT)); - } - - public void testCopyFilesToException() throws Exception { - String filename = "_0.cfe"; - populateMetadata(); - remoteSegmentStoreDirectory.init(); - - Directory storeDirectory = mock(Directory.class); - Mockito.doThrow(new IOException()) - .when(storeDirectory) - .copyFrom(any(Directory.class), anyString(), anyString(), any(IOContext.class)); - CountDownLatch downloadLatch = new CountDownLatch(1); - ActionListener completionListener = new ActionListener<>() { - @Override - public void onResponse(String unused) { - - } - - @Override - public void onFailure(Exception e) { - downloadLatch.countDown(); - } - }; - Path path = createTempDir(); - remoteSegmentStoreDirectory.copyTo(filename, storeDirectory, path, new DirectoryFileTransferTracker(), completionListener); - assertTrue(downloadLatch.await(5000, TimeUnit.MILLISECONDS)); - verify(storeDirectory, times(1)).copyFrom(any(), eq(filename), eq(filename), eq(IOContext.DEFAULT)); - } - public void testCopyFilesFromMultipartIOException() throws Exception { String filename = "_100.si"; AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); @@ -689,8 +567,7 @@ public void testCopyFilesFromMultipartIOException() throws Exception { remoteMetadataDirectory, mdLockManager, threadPool, - indexShard.shardId(), - DefaultRecoverySettings.INSTANCE + indexShard.shardId() ); populateMetadata(); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteStoreFileDownloaderTests.java b/server/src/test/java/org/opensearch/index/store/RemoteStoreFileDownloaderTests.java new file mode 100644 index 0000000000000..588d9e8bb13a2 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/RemoteStoreFileDownloaderTests.java @@ -0,0 +1,119 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.NIOFSDirectory; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.EOFException; +import java.io.IOException; +import java.nio.file.NoSuchFileException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +public class RemoteStoreFileDownloaderTests extends OpenSearchTestCase { + + private ThreadPool threadPool; + private Directory source; + private Directory destination; + private Directory secondDestination; + private RemoteStoreFileDownloader fileDownloader; + private Map files = new HashMap<>(); + + @Before + public void setup() throws IOException { + final int streamLimit = randomIntBetween(1, 20); + final RecoverySettings recoverySettings = new RecoverySettings( + Settings.builder().put("indices.recovery.max_concurrent_remote_store_streams", streamLimit).build(), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + threadPool = new TestThreadPool(getTestName()); + source = new NIOFSDirectory(createTempDir()); + destination = new NIOFSDirectory(createTempDir()); + secondDestination = new NIOFSDirectory(createTempDir()); + for (int i = 0; i < 10; i++) { + final String filename = "file_" + i; + final int content = randomInt(); + try (IndexOutput output = source.createOutput(filename, IOContext.DEFAULT)) { + output.writeInt(content); + } + files.put(filename, content); + } + fileDownloader = new RemoteStoreFileDownloader( + ShardId.fromString("[RemoteStoreFileDownloaderTests][0]"), + threadPool, + recoverySettings + ); + } + + @After + public void stopThreadPool() throws Exception { + threadPool.shutdown(); + assertTrue(threadPool.awaitTermination(5, TimeUnit.SECONDS)); + } + + public void testDownload() throws IOException { + fileDownloader.download(source, destination, files.keySet()); + assertContent(files, destination); + } + + public void testDownloadWithSecondDestination() throws IOException { + fileDownloader.download(source, destination, secondDestination, files.keySet(), () -> {}); + assertContent(files, destination); + assertContent(files, secondDestination); + } + + public void testDownloadWithFileCompletionHandler() throws IOException { + final AtomicInteger counter = new AtomicInteger(0); + fileDownloader.download(source, destination, null, files.keySet(), counter::incrementAndGet); + assertContent(files, destination); + assertEquals(files.size(), counter.get()); + } + + public void testDownloadNonExistentFile() { + assertThrows(NoSuchFileException.class, () -> fileDownloader.download(source, destination, Set.of("not real"))); + } + + public void testDownloadExtraNonExistentFile() { + List filesWithExtra = new ArrayList<>(files.keySet()); + filesWithExtra.add("not real"); + assertThrows(NoSuchFileException.class, () -> fileDownloader.download(source, destination, filesWithExtra)); + } + + private static void assertContent(Map expected, Directory destination) throws IOException { + // Note that Lucene will randomly write extra files (see org.apache.lucene.tests.mockfile.ExtraFS) + // so we just need to check that all the expected files are present but not that _only_ the expected + // files are present + final Set actualFiles = Set.of(destination.listAll()); + for (String file : expected.keySet()) { + assertTrue(actualFiles.contains(file)); + try (IndexInput input = destination.openInput(file, IOContext.DEFAULT)) { + assertEquals(expected.get(file), Integer.valueOf(input.readInt())); + assertThrows(EOFException.class, input::readByte); + } + } + } +} diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 80731b378f369..97c5d23831965 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -2067,11 +2067,12 @@ public void onFailure(final Exception e) { emptyMap(), null, emptyMap(), - new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool, DefaultRecoverySettings.INSTANCE), + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), repositoriesServiceReference::get, fileCacheCleaner, null, - new RemoteStoreStatsTrackerFactory(clusterService, settings) + new RemoteStoreStatsTrackerFactory(clusterService, settings), + DefaultRecoverySettings.INSTANCE ); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); snapshotShardsService = new SnapshotShardsService( diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 186c1c7e78f6b..ffa37817e8a03 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -702,7 +702,7 @@ protected IndexShard newShard( remoteStoreStatsTrackerFactory, () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, "dummy-node", - null + DefaultRecoverySettings.INSTANCE ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); if (remoteStoreStatsTrackerFactory != null) { @@ -789,14 +789,7 @@ protected RemoteSegmentStoreDirectory createRemoteSegmentStoreDirectory(ShardId RemoteStoreLockManager remoteStoreLockManager = new RemoteStoreMetadataLockManager( new RemoteBufferedOutputDirectory(getBlobContainer(remoteShardPath.resolveIndex())) ); - return new RemoteSegmentStoreDirectory( - dataDirectory, - metadataDirectory, - remoteStoreLockManager, - threadPool, - shardId, - DefaultRecoverySettings.INSTANCE - ); + return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, remoteStoreLockManager, threadPool, shardId); } private RemoteDirectory newRemoteDirectory(Path f) throws IOException { From c0852f84f0647f78a45e71396d6370a39ba4922f Mon Sep 17 00:00:00 2001 From: Laurent Laborde Date: Wed, 11 Oct 2023 04:42:47 +0200 Subject: [PATCH 12/33] New javadoc and improvement of existing javadoc (#10064) * Fixing javadoc error fixing javadoc error and warning javadoc for org.opensearch.core.indices.breaker javadoc for org.opensearch.core.index.shard.ShardId javadoc for org.opensearch.core.index.Index fixing a mishap in formatting rule fixing javadoc of org.opensearch.cli.Terminal Signed-off-by: Laurent Laborde * removing unsupported @ImplNote Signed-off-by: Laurent Laborde * Update libs/common/src/main/java/org/opensearch/common/collect/Iterators.java Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * Update libs/compress/src/main/java/org/opensearch/compress/ZstdCompressor.java Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * Apply suggestions from code review Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * Apply suggestions from code review Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * Apply suggestions from code review Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * Apply suggestions from code review Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * Update CollectionUtils.java Signed-off-by: Laurent Laborde * fixing javadoc warnings Signed-off-by: Laurent Laborde * remove useless p tag Signed-off-by: Laurent Laborde * removing a p tag Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * proper capitalization Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * improvements of existing javadocs Signed-off-by: Laurent Laborde * javadoc improvement Signed-off-by: Laurent Laborde * javadoc improvement Signed-off-by: Laurent Laborde * Fix formatting error after merge conflict resolution in a javadoc Signed-off-by: Laurent Laborde * "code" tag ignore carriage return & "p", switching to a "pre" tag Signed-off-by: Laurent Laborde * fixing an overzealous precommit check error. Signed-off-by: Laurent Laborde * Update distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * Update server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * Update server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * Update server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * Update server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * Update server/src/main/java/org/opensearch/cluster/RestoreInProgress.java Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * Update test/framework/src/main/java/org/opensearch/test/AbstractDiffableWireSerializationTestCase.java Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * Update test/framework/src/main/java/org/opensearch/test/AbstractDiffableSerializationTestCase.java Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * further improvement of javadoc after review & suggestions Signed-off-by: Laurent Laborde * fixing unreferenced @link in javadoc (precommit error) Signed-off-by: Laurent Laborde * removing p tag from internal comment Signed-off-by: Laurent Laborde * reverting javadoc tag in licence header Signed-off-by: Laurent Laborde * Update server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * conflict resolution Signed-off-by: Laurent Laborde --------- Signed-off-by: Laurent Laborde Co-authored-by: Andriy Redko --- .../org/opensearch/gradle/reaper/Reaper.java | 5 +- .../org/opensearch/gradle/BwcVersions.java | 8 +- .../gradle/LoggingOutputStream.java | 2 +- .../precommit/TestingConventionRule.java | 2 +- .../gradle/tar/SymbolicLinkPreservingTar.java | 2 +- .../gradle/test/rest/RestResourcesPlugin.java | 2 +- .../gradle/vagrant/VagrantMachine.java | 4 +- .../gradle/vagrant/VagrantShellTask.java | 2 +- .../gradle/pluginzip/PublishTests.java | 12 +-- .../gradle/test/GradleThreadsFilter.java | 2 +- .../gradle/test/JUnit3MethodProvider.java | 2 +- .../benchmark/metrics/SampleRecorder.java | 2 +- .../org/opensearch/client/ClusterClient.java | 2 +- .../opensearch/client/RequestConverters.java | 6 +- .../org/opensearch/client/TimedRequest.java | 2 +- .../client/indices/CreateIndexRequest.java | 16 +-- .../client/indices/PutMappingRequest.java | 8 +- .../opensearch/client/tasks/TaskGroup.java | 1 - .../client/AbstractRequestTestCase.java | 2 +- .../client/AbstractResponseTestCase.java | 2 +- .../client/BulkProcessorRetryIT.java | 2 +- .../IndicesClientDocumentationIT.java | 6 +- .../IngestClientDocumentationIT.java | 6 +- .../SnapshotClientDocumentationIT.java | 6 +- .../StoredScriptsDocumentationIT.java | 6 +- .../TasksClientDocumentationIT.java | 6 +- .../indices/RandomCreateIndexGenerator.java | 2 +- .../org/opensearch/client/RestClient.java | 2 +- .../RestClientDocumentation.java | 6 +- .../documentation/SnifferDocumentation.java | 6 +- .../tools/launchers/SystemJvmOptions.java | 18 ++-- .../plugins/InstallPluginCommand.java | 5 +- .../plugins/ProgressInputStream.java | 2 +- .../org/opensearch/upgrade/UpgradeTask.java | 2 +- .../missingdoclet/MissingDoclet.java | 2 +- .../main/java/org/opensearch/cli/Command.java | 2 +- .../java/org/opensearch/cli/Terminal.java | 2 +- .../org/opensearch/bootstrap/JarHell.java | 4 +- .../java/org/opensearch/common/Explicit.java | 2 +- .../opensearch/common/LocalTimeOffset.java | 2 +- .../java/org/opensearch/common/SetOnce.java | 2 +- .../org/opensearch/common/collect/Tuple.java | 1 - .../org/opensearch/common/io/PathUtils.java | 4 +- .../common/network/InetAddresses.java | 4 +- .../org/opensearch/common/unit/TimeValue.java | 8 +- .../org/opensearch/common/util/BitMixer.java | 6 +- .../src/main/java/org/opensearch/Build.java | 2 +- .../java/org/opensearch/LegacyESVersion.java | 2 +- .../org/opensearch/OpenSearchException.java | 8 +- .../core/action/ActionListener.java | 6 +- .../org/opensearch/core/common/Strings.java | 2 +- .../common/bytes/CompositeBytesReference.java | 2 +- .../common/io/stream/BytesStreamInput.java | 2 +- .../io/stream/NamedWriteableRegistry.java | 2 +- .../core/common/io/stream/StreamInput.java | 4 +- .../core/common/io/stream/StreamOutput.java | 2 +- .../core/common/settings/SecureString.java | 4 +- .../core/common/unit/ByteSizeUnit.java | 2 +- .../opensearch/core/compress/Compressor.java | 2 +- .../core/compress/CompressorRegistry.java | 4 +- .../core/compress/spi/CompressorProvider.java | 2 +- .../java/org/opensearch/core/index/Index.java | 4 +- .../opensearch/core/index/shard/ShardId.java | 2 +- .../core/xcontent/AbstractObjectParser.java | 14 +-- .../core/xcontent/MapXContentParser.java | 2 +- .../core/xcontent/XContentBuilder.java | 2 +- .../xcontent/XContentBuilderExtension.java | 6 +- .../core/xcontent/XContentParser.java | 6 +- .../core/xcontent/XContentParserUtils.java | 4 +- .../core/xcontent/XContentSubParser.java | 2 +- .../org/opensearch/dissect/DissectParser.java | 39 ++++---- .../java/org/opensearch/geometry/Circle.java | 1 + .../opensearch/geometry/utils/BitUtil.java | 4 +- .../opensearch/geometry/utils/Geohash.java | 6 +- .../main/java/org/opensearch/grok/Grok.java | 2 +- .../org/opensearch/grok/MatcherWatchdog.java | 2 +- .../org/opensearch/nio/ChannelContext.java | 4 +- .../java/org/opensearch/nio/NioSelector.java | 4 +- .../opensearch/nio/SocketChannelContext.java | 2 +- .../opensearch/telemetry/tracing/Tracer.java | 2 +- .../telemetry/tracing/http/HttpTracer.java | 2 +- .../common/xcontent/XContentParserTests.java | 2 +- .../matrix/stats/RunningStats.java | 4 +- .../common/CommonAnalysisModulePlugin.java | 2 +- .../KeywordMarkerTokenFilterFactory.java | 4 +- .../analysis/common/SnowballAnalyzer.java | 2 +- .../bucket/composite/GeoTileValuesSource.java | 2 +- .../geogrid/cells/BoundedCellValues.java | 2 +- .../ingest/common/FailProcessorException.java | 2 +- .../ingest/common/ForEachProcessor.java | 4 +- .../expression/ExpressionScriptEngine.java | 2 +- .../script/mustache/MustacheScriptEngine.java | 2 +- .../mustache/MustacheScriptEngineTests.java | 2 +- .../opensearch/painless/spi/Allowlist.java | 2 +- .../painless/spi/AllowlistClass.java | 4 +- .../painless/spi/AllowlistLoader.java | 12 +-- .../painless/spi/AllowlistMethod.java | 2 +- .../org/opensearch/painless/Compiler.java | 4 +- .../lookup/PainlessLookupUtility.java | 6 +- .../phase/PainlessSemanticAnalysisPhase.java | 8 +- .../painless/symbol/SemanticScope.java | 2 +- .../mapper/SearchAsYouTypeFieldMapper.java | 2 +- .../join/query/HasChildQueryBuilder.java | 2 +- .../rankeval/TransportRankEvalAction.java | 4 +- .../DiscountedCumulativeGainTests.java | 98 +++++++++---------- .../AbstractAsyncBulkByScrollAction.java | 2 +- .../BulkByScrollParallelizationHelper.java | 12 +-- .../netty4/Netty4HttpRequestSizeLimitIT.java | 2 +- .../http/netty4/Netty4HttpRequest.java | 2 +- .../transport/CopyBytesSocketChannel.java | 2 +- .../analysis/phonetic/HaasePhonetik.java | 4 +- .../analysis/phonetic/KoelnerPhonetik.java | 6 +- .../index/analysis/phonetic/Nysiis.java | 2 +- .../discovery/gce/GceDiscoveryTests.java | 2 - .../BasePerFieldCorrelationVectorsFormat.java | 4 +- .../query/CorrelationQueryBuilderTests.java | 1 + .../customsuggester/CustomSuggestion.java | 3 +- .../ExampleAllowlistedClass.java | 2 +- .../ExampleAllowlistedInstance.java | 2 +- .../ExamplePainlessAnnotation.java | 2 +- .../ExampleStaticMethodClass.java | 2 +- .../AnnotatedTextFieldMapper.java | 4 +- .../azure/AzureBlobStoreRepositoryTests.java | 2 +- .../azure/AzureStorageService.java | 8 +- .../azure/AzureStorageServiceTests.java | 3 +- ...eCloudStorageBlobStoreRepositoryTests.java | 2 +- .../hdfs/HdfsSecurityContext.java | 2 +- .../hdfs/HdfsClientThreadLeakFilter.java | 2 +- .../s3/S3BlobStoreRepositoryTests.java | 2 +- .../repositories/s3/S3ClientSettings.java | 2 +- .../s3/S3RetryingInputStream.java | 2 +- .../index/store/SmbMMapDirectoryTests.java | 6 +- .../index/store/SmbNIOFSDirectoryTests.java | 6 +- .../opensearch/http/nio/NioHttpRequest.java | 2 +- .../upgrades/QueryBuilderBWCIT.java | 2 +- .../bootstrap/SpawnerNoBootstrapTests.java | 2 +- .../packaging/test/PackagingTestCase.java | 2 +- .../packaging/util/FileMatcher.java | 2 +- .../packaging/util/Installation.java | 2 +- .../org/opensearch/upgrades/IndexingIT.java | 13 +-- .../opensearch/action/admin/HotThreadsIT.java | 4 +- .../cluster/node/tasks/AbstractTasksIT.java | 2 +- .../node/tasks/ConcurrentSearchTasksIT.java | 4 +- .../repositories/RepositoryBlocksIT.java | 2 +- .../cluster/snapshots/SnapshotBlocksIT.java | 2 +- .../action/bulk/BulkProcessorRetryIT.java | 2 +- .../action/ingest/AsyncIngestProcessorIT.java | 2 +- .../action/search/TransportSearchIT.java | 4 +- .../discovery/ClusterDisruptionIT.java | 2 +- .../SegmentReplicationAllocationIT.java | 2 +- .../indices/state/CloseIndexIT.java | 2 +- .../recovery/FullRollingRestartIT.java | 10 +- .../aggregations/bucket/DateHistogramIT.java | 2 +- .../aggregations/pipeline/MaxBucketIT.java | 4 +- .../aggregations/pipeline/MovAvgIT.java | 2 +- .../search/pit/DeletePitMultiNodeIT.java | 34 +++---- .../search/searchafter/SearchAfterIT.java | 4 +- .../search/sort/GeoDistanceSortBuilderIT.java | 50 +++++----- .../SharedClusterSnapshotRestoreIT.java | 2 +- .../ConcurrentSeqNoVersioningIT.java | 2 +- .../apache/lucene/index/OneMergeHelper.java | 4 +- .../queryparser/classic/XQueryParser.java | 2 +- .../grouping/CollapsingTopDocsCollector.java | 2 +- .../BoundedBreakIteratorScanner.java | 6 +- .../apache/lucene/util/packed/XPacked64.java | 4 +- .../util/packed/XPacked64SingleBlock.java | 4 +- .../lucene/util/packed/XPackedInts.java | 4 +- .../org/opensearch/action/AliasesRequest.java | 2 +- .../opensearch/action/DocWriteRequest.java | 2 +- .../opensearch/action/DocWriteResponse.java | 2 +- .../action/TaskOperationFailure.java | 2 +- .../ClusterAllocationExplainRequest.java | 2 +- .../tasks/get/TransportGetTaskAction.java | 2 +- .../TransportCleanupRepositoryAction.java | 2 +- .../get/GetRepositoriesRequest.java | 2 +- .../create/CreateSnapshotRequest.java | 4 +- .../restore/RestoreSnapshotRequest.java | 2 +- .../alias/get/BaseAliasesRequestBuilder.java | 2 +- .../indices/create/CreateIndexRequest.java | 10 +- .../list/ListDanglingIndicesResponse.java | 2 +- .../mapping/get/GetFieldMappingsRequest.java | 2 +- .../mapping/get/GetFieldMappingsResponse.java | 2 +- .../mapping/put/PutMappingRequest.java | 2 +- .../SegmentReplicationStatsRequest.java | 2 +- .../indices/rollover/RolloverRequest.java | 2 +- .../indices/rollover/RolloverResponse.java | 2 +- .../opensearch/action/bulk/BackoffPolicy.java | 6 +- .../action/bulk/BulkItemResponse.java | 2 +- .../opensearch/action/bulk/BulkProcessor.java | 2 +- .../opensearch/action/bulk/BulkRequest.java | 10 +- .../action/delete/DeleteRequest.java | 2 +- .../action/delete/TransportDeleteAction.java | 2 +- .../opensearch/action/index/IndexRequest.java | 10 +- .../action/index/TransportIndexAction.java | 2 +- .../action/ingest/IngestActionForwarder.java | 2 +- .../action/search/CreatePitController.java | 20 ++-- .../action/search/SearchPhaseController.java | 4 +- .../action/search/SearchRequest.java | 4 +- .../action/search/SearchRequestBuilder.java | 2 +- .../action/search/SearchResponseSections.java | 2 +- .../search/TransportDeletePitAction.java | 2 +- .../action/support/TransportAction.java | 2 +- .../support/nodes/BaseNodesRequest.java | 4 +- .../replication/ReplicationOperation.java | 2 +- .../TransportReplicationAction.java | 4 +- .../replication/TransportWriteAction.java | 4 +- .../single/shard/SingleShardRequest.java | 2 +- .../support/tasks/BaseTasksRequest.java | 2 +- .../action/update/UpdateRequest.java | 2 +- .../opensearch/bootstrap/BootstrapChecks.java | 2 +- .../bootstrap/JNAKernel32Library.java | 24 ++--- .../org/opensearch/bootstrap/OpenSearch.java | 4 +- .../AckedClusterStateTaskListener.java | 2 +- .../cluster/ClusterStateTaskExecutor.java | 4 +- .../org/opensearch/cluster/DiffableUtils.java | 8 +- .../cluster/InternalClusterInfoService.java | 2 +- .../opensearch/cluster/RestoreInProgress.java | 2 +- .../coordination/ClusterStatePublisher.java | 4 +- .../coordination/JoinTaskExecutor.java | 4 +- .../cluster/coordination/Reconfigurator.java | 2 +- .../cluster/health/ClusterShardHealth.java | 4 +- .../cluster/metadata/IndexGraveyard.java | 4 +- .../cluster/metadata/IndexMetadata.java | 4 +- .../metadata/IndexTemplateMetadata.java | 4 +- .../opensearch/cluster/metadata/Metadata.java | 2 +- .../metadata/MetadataCreateIndexService.java | 6 +- .../metadata/MetadataIndexStateService.java | 4 +- .../MetadataIndexTemplateService.java | 2 +- .../cluster/node/DiscoveryNodes.java | 12 +-- .../routing/IndexShardRoutingTable.java | 2 +- .../cluster/routing/RecoverySource.java | 2 +- .../cluster/routing/RoutingNodes.java | 14 +-- .../routing/ShardMovementStrategy.java | 2 +- .../cluster/routing/UnassignedInfo.java | 2 +- .../allocation/AwarenessReplicaBalance.java | 2 +- .../routing/allocation/ConstraintTypes.java | 4 +- .../allocation/IndexMetadataUpdater.java | 4 +- .../allocator/BalancedShardsAllocator.java | 2 +- .../allocator/LocalShardsBalancer.java | 2 +- .../allocation/allocator/ShardsAllocator.java | 4 +- .../allocation/command/AllocationCommand.java | 2 +- .../allocation/decider/AllocationDecider.java | 2 +- .../decider/DiskThresholdDecider.java | 12 +-- .../decider/SameShardAllocationDecider.java | 2 +- .../decider/ThrottlingAllocationDecider.java | 2 +- .../service/ClusterApplierService.java | 2 +- .../service/ClusterManagerTaskThrottler.java | 8 +- .../org/opensearch/common/Randomness.java | 2 +- .../common/blobstore/BlobContainer.java | 6 +- .../common/blobstore/fs/FsBlobContainer.java | 4 +- .../breaker/ChildMemoryCircuitBreaker.java | 2 +- .../org/opensearch/common/cache/Cache.java | 2 +- .../common/collect/CopyOnWriteHashMap.java | 8 +- .../opensearch/common/geo/GeoShapeType.java | 10 +- .../org/opensearch/common/geo/GeoUtils.java | 4 +- .../opensearch/common/geo/GeometryFormat.java | 2 +- .../common/geo/builders/PolygonBuilder.java | 10 +- .../common/geo/parsers/GeoJsonParser.java | 2 +- .../common/geo/parsers/GeoWKTParser.java | 2 +- .../opensearch/common/hash/MurmurHash3.java | 2 +- .../opensearch/common/inject/Initializer.java | 5 +- .../common/io/stream/Streamables.java | 4 +- .../common/joda/JodaDateMathParser.java | 2 +- .../JsonThrowablePatternConverter.java | 2 +- .../common/lucene/ShardCoreKeyMap.java | 2 +- .../opensearch/common/rounding/Rounding.java | 2 +- .../settings/AbstractScopedSettings.java | 4 +- .../common/settings/KeyStoreWrapper.java | 8 +- .../common/settings/SecureSetting.java | 6 +- .../opensearch/common/settings/Settings.java | 2 +- .../common/time/DateFormatters.java | 4 +- .../common/time/DateMathParser.java | 6 +- .../org/opensearch/common/time/DateUtils.java | 2 +- .../common/time/DateUtilsRounding.java | 6 +- .../common/time/JavaDateFormatter.java | 2 +- .../common/time/JavaDateMathParser.java | 2 +- .../common/util/BinarySearcher.java | 10 +- .../common/util/CancellableThreads.java | 2 +- .../opensearch/common/util/CuckooFilter.java | 44 ++++----- .../util/SetBackedScalingCuckooFilter.java | 4 +- .../opensearch/common/util/TokenBucket.java | 2 +- .../opensearch/common/util/URIPattern.java | 4 +- .../util/concurrent/ListenableFuture.java | 2 +- ...ioritizedOpenSearchThreadPoolExecutor.java | 5 +- .../common/util/concurrent/ThreadContext.java | 8 +- .../xcontent/LoggingDeprecationHandler.java | 2 +- .../common/xcontent/XContentHelper.java | 2 +- .../xcontent/support/XContentMapValues.java | 13 ++- .../discovery/FileBasedSeedHostsProvider.java | 4 +- .../SettingsBasedSeedHostsProvider.java | 2 +- .../java/org/opensearch/env/Environment.java | 4 +- .../org/opensearch/env/NodeEnvironment.java | 4 +- .../extensions/ExtensionReader.java | 2 +- .../gateway/BaseGatewayShardAllocator.java | 2 +- .../gateway/DanglingIndicesState.java | 2 +- .../opensearch/gateway/GatewayMetaState.java | 2 +- .../gateway/PersistedClusterStateService.java | 8 +- .../gateway/PrimaryShardAllocator.java | 12 +-- .../gateway/ReplicaShardAllocator.java | 2 +- .../remote/RemoteClusterStateService.java | 9 +- .../java/org/opensearch/http/CorsHandler.java | 2 +- .../identity/noop/NoopIdentityPlugin.java | 2 +- .../opensearch/identity/noop/NoopSubject.java | 2 +- .../identity/tokens/RestTokenExtractor.java | 2 +- .../org/opensearch/index/IndexService.java | 4 +- .../org/opensearch/index/IndexSettings.java | 4 +- .../org/opensearch/index/IndexSortConfig.java | 2 +- .../index/MergeSchedulerConfig.java | 4 +- .../index/ShardIndexingPressure.java | 2 +- .../ShardIndexingPressureMemoryManager.java | 8 +- .../index/ShardIndexingPressureStore.java | 10 +- .../index/ShardIndexingPressureTracker.java | 6 +- .../org/opensearch/index/VersionType.java | 4 +- .../index/analysis/AnalysisRegistry.java | 2 +- .../NormalizingCharFilterFactory.java | 2 +- .../NormalizingTokenFilterFactory.java | 2 +- .../PreBuiltAnalyzerProviderFactory.java | 2 +- .../analysis/ShingleTokenFilterFactory.java | 10 +- .../index/analysis/TokenFilterFactory.java | 4 +- .../index/engine/InternalEngine.java | 2 +- .../index/engine/NRTReplicationEngine.java | 2 +- .../index/engine/ReplicaFileTracker.java | 2 +- .../index/fielddata/MultiGeoPointValues.java | 2 +- .../index/fielddata/ScriptDocValues.java | 12 +-- .../SortedSetOrdinalsIndexFieldData.java | 6 +- .../mapper/AbstractGeometryFieldMapper.java | 4 +- .../index/mapper/ArraySourceValueFetcher.java | 2 +- .../index/mapper/CompletionFieldMapper.java | 4 +- .../index/mapper/CustomDocValuesField.java | 3 +- .../index/mapper/DocumentMapperParser.java | 3 +- .../index/mapper/DynamicKeyFieldMapper.java | 2 +- .../mapper/DynamicKeyFieldTypeLookup.java | 4 +- .../index/mapper/FieldAliasMapper.java | 2 +- .../opensearch/index/mapper/FieldMapper.java | 2 +- .../index/mapper/FieldTypeLookup.java | 4 +- .../index/mapper/FlatObjectFieldMapper.java | 6 +- .../index/mapper/GeoPointFieldMapper.java | 2 +- .../index/mapper/MappedFieldType.java | 4 +- .../org/opensearch/index/mapper/Mapper.java | 2 +- .../index/mapper/MappingLookup.java | 2 +- .../index/mapper/MetadataFieldMapper.java | 2 +- .../index/mapper/ParametrizedFieldMapper.java | 8 +- .../index/mapper/SeqNoFieldMapper.java | 4 +- .../index/mapper/SourceValueFetcher.java | 2 +- .../opensearch/index/mapper/ValueFetcher.java | 4 +- .../index/query/AbstractQueryBuilder.java | 2 +- .../query/GeoBoundingBoxQueryBuilder.java | 2 +- .../index/query/GeoShapeQueryBuilder.java | 2 +- .../index/query/GeoValidationMethod.java | 2 +- .../index/query/IntervalsSourceProvider.java | 2 +- .../query/MatchBoolPrefixQueryBuilder.java | 2 +- .../index/query/MatchQueryBuilder.java | 2 +- .../index/query/MoreLikeThisQueryBuilder.java | 2 +- .../index/query/MultiMatchQueryBuilder.java | 4 +- .../index/query/QueryShardContext.java | 2 +- .../index/query/QueryStringQueryBuilder.java | 2 +- .../index/query/RangeQueryBuilder.java | 6 +- .../index/query/SearchIndexNameMatcher.java | 2 +- .../index/query/SimpleQueryStringBuilder.java | 2 +- .../index/reindex/BulkByScrollTask.java | 6 +- .../index/reindex/DeleteByQueryRequest.java | 2 +- .../opensearch/index/search/MatchQuery.java | 6 +- .../index/seqno/LocalCheckpointTracker.java | 4 +- .../index/seqno/ReplicationTracker.java | 10 +- .../index/shard/IndexEventListener.java | 2 +- .../opensearch/index/shard/IndexShard.java | 10 +- .../index/shard/RefreshListeners.java | 4 +- .../index/similarity/SimilarityProvider.java | 24 ++--- .../RemoteStoreShardShallowCopySnapshot.java | 8 +- .../org/opensearch/index/store/Store.java | 15 +-- .../remote/file/OnDemandBlockIndexInput.java | 6 +- .../DefaultTranslogDeletionPolicy.java | 2 +- .../opensearch/index/translog/Translog.java | 4 +- .../index/translog/TranslogWriter.java | 2 +- .../opensearch/indices/IndicesService.java | 6 +- .../indices/ShardLimitValidator.java | 2 +- .../analysis/PreBuiltCacheFactory.java | 10 +- .../recovery/RecoveryTargetHandler.java | 2 +- .../recovery/RemoteRecoveryTargetHandler.java | 2 +- .../OngoingSegmentReplications.java | 16 +-- .../common/ReplicationRequestTracker.java | 3 +- .../opensearch/ingest/ConfigurationUtils.java | 22 ++--- .../org/opensearch/ingest/IngestService.java | 2 +- .../java/org/opensearch/ingest/Pipeline.java | 4 +- .../java/org/opensearch/ingest/Processor.java | 4 +- .../org/opensearch/ingest/ValueSource.java | 2 +- .../org/opensearch/monitor/os/OsProbe.java | 10 +- .../NodePersistentTasksExecutor.java | 2 +- .../PersistentTasksClusterService.java | 2 +- .../plugins/CircuitBreakerPlugin.java | 6 +- .../org/opensearch/plugins/ClusterPlugin.java | 4 +- .../opensearch/plugins/DiscoveryPlugin.java | 9 +- .../org/opensearch/plugins/EnginePlugin.java | 2 +- .../opensearch/plugins/ExtensiblePlugin.java | 4 +- .../opensearch/plugins/IdentityPlugin.java | 8 +- .../org/opensearch/plugins/IngestPlugin.java | 2 +- .../java/org/opensearch/plugins/Plugin.java | 2 +- .../opensearch/plugins/PluginsService.java | 2 +- .../opensearch/plugins/RepositoryPlugin.java | 10 +- .../plugins/SearchPipelinePlugin.java | 6 +- .../org/opensearch/plugins/SearchPlugin.java | 4 +- .../org/opensearch/repositories/IndexId.java | 2 +- .../blobstore/BlobStoreRepository.java | 2 +- .../org/opensearch/rest/RestController.java | 2 +- .../java/org/opensearch/script/Script.java | 18 ++-- .../org/opensearch/script/ScriptCache.java | 2 +- .../org/opensearch/script/ScriptContext.java | 2 +- .../script/ScriptLanguagesInfo.java | 4 +- .../org/opensearch/script/ScriptMetadata.java | 6 +- .../opensearch/script/StoredScriptSource.java | 4 +- .../org/opensearch/search/MultiValueMode.java | 32 +++--- .../opensearch/search/SearchExtBuilder.java | 4 +- .../java/org/opensearch/search/SearchHit.java | 2 +- .../org/opensearch/search/SearchModule.java | 2 +- .../search/aggregations/AggregatorBase.java | 2 +- .../aggregations/InternalAggregations.java | 2 +- .../bucket/BucketsAggregator.java | 4 +- .../aggregations/bucket/GeoTileUtils.java | 2 +- .../MergingBucketsDeferringCollector.java | 15 +-- .../DateHistogramValuesSourceBuilder.java | 4 +- .../bucket/composite/InternalComposite.java | 12 +-- .../bucket/composite/ParsedComposite.java | 10 +- .../bucket/composite/SortedDocsProducer.java | 2 +- .../filter/FilterAggregatorFactory.java | 2 +- .../filter/FiltersAggregatorFactory.java | 2 +- .../AutoDateHistogramAggregator.java | 2 +- .../DateHistogramAggregationBuilder.java | 4 +- .../histogram/DateHistogramAggregator.java | 4 +- .../histogram/DateHistogramInterval.java | 2 +- .../histogram/DateIntervalConsumer.java | 2 +- .../bucket/histogram/DateIntervalWrapper.java | 8 +- .../bucket/histogram/DoubleBounds.java | 2 +- .../InternalVariableWidthHistogram.java | 4 +- .../bucket/histogram/LongBounds.java | 2 +- .../VariableWidthHistogramAggregator.java | 10 +- .../bucket/sampler/SamplerAggregator.java | 2 +- .../bucket/terms/InternalMultiTerms.java | 4 +- .../bucket/terms/InternalTerms.java | 10 +- .../bucket/terms/MultiTermsAggregator.java | 8 +- .../terms/RareTermsAggregationBuilder.java | 2 +- .../bucket/terms/TermsAggregator.java | 8 +- .../metrics/AbstractHyperLogLog.java | 6 +- .../metrics/AbstractHyperLogLogPlusPlus.java | 1 - .../metrics/AbstractLinearCounting.java | 8 +- ...AbstractPercentilesAggregationBuilder.java | 17 ++-- .../metrics/HyperLogLogPlusPlus.java | 8 +- .../metrics/HyperLogLogPlusPlusSparse.java | 2 +- .../metrics/ValueCountAggregator.java | 2 +- .../aggregations/pipeline/BucketHelpers.java | 2 +- .../pipeline/MovFnPipelineAggregator.java | 22 ++--- .../pipeline/MovingFunctions.java | 22 ++--- .../pipeline/SimulatedAnealingMinimizer.java | 6 +- .../support/AggregationInspectionHelper.java | 4 +- .../support/CoreValuesSourceType.java | 2 +- .../MultiValuesSourceAggregationBuilder.java | 2 +- .../ValuesSourceAggregationBuilder.java | 2 +- .../support/ValuesSourceType.java | 6 +- .../SearchBackpressureService.java | 4 +- .../settings/SearchBackpressureSettings.java | 6 +- .../search/fetch/FetchSubPhase.java | 4 +- .../fetch/subphase/FetchDocValuesPhase.java | 2 +- .../fetch/subphase/InnerHitsContext.java | 4 +- .../search/internal/ContextIndexSearcher.java | 2 +- .../search/internal/SearchContext.java | 2 +- .../search/lookup/SourceLookup.java | 2 +- .../pipeline/PipelineConfiguration.java | 2 +- .../opensearch/search/pipeline/Processor.java | 2 +- .../search/pipeline/ProcessorInfo.java | 2 +- .../profile/AbstractInternalProfileTree.java | 4 +- .../query/InternalProfileCollector.java | 13 +-- .../search/profile/query/QueryProfiler.java | 2 +- .../search/query/QuerySearchResult.java | 2 +- .../opensearch/search/rescore/Rescorer.java | 2 +- .../search/slice/DocValuesSliceQuery.java | 2 +- .../search/slice/TermsSliceQuery.java | 2 +- .../opensearch/search/suggest/Suggest.java | 2 +- .../completion/CompletionSuggestion.java | 4 +- .../suggest/completion/FuzzyOptions.java | 2 +- .../TopSuggestGroupDocsCollector.java | 4 +- .../completion/context/ContextMapping.java | 2 +- .../completion/context/ContextMappings.java | 10 +- .../completion/context/GeoContextMapping.java | 2 +- .../phrase/DirectCandidateGenerator.java | 14 +-- .../suggest/phrase/LinearInterpolation.java | 2 +- .../phrase/PhraseSuggestionBuilder.java | 2 +- .../snapshots/SnapshotsService.java | 8 +- .../main/java/org/opensearch/tasks/Task.java | 4 +- .../opensearch/tasks/TaskCancellation.java | 4 +- .../org/opensearch/threadpool/Scheduler.java | 4 +- .../threadpool/TaskAwareRunnable.java | 2 +- .../org/opensearch/threadpool/ThreadPool.java | 10 +- .../CompressibleBytesOutputStream.java | 4 +- .../transport/RemoteClusterConnection.java | 6 +- .../transport/TransportService.java | 8 +- .../org/opensearch/watcher/FileWatcher.java | 2 +- .../watcher/ResourceWatcherService.java | 2 +- .../opensearch/OpenSearchExceptionTests.java | 2 +- .../search/CreatePitControllerTests.java | 12 +-- .../TransportReplicationActionTests.java | 2 +- ...ReplicationAllPermitsAcquisitionTests.java | 2 +- .../allocation/AllocationPriorityTests.java | 2 +- .../allocation/BalanceConfigurationTests.java | 14 +-- .../allocation/CatAllocationTestCase.java | 2 +- ...dexShardConstraintDeciderOverlapTests.java | 2 +- .../allocation/IndexShardHotSpotTests.java | 2 +- .../RemoteShardsRebalanceShardsTests.java | 4 +- .../org/opensearch/common/RoundingTests.java | 2 +- .../common/geo/GeoJsonShapeParserTests.java | 6 +- .../common/geo/GeometryIndexerTests.java | 2 +- .../joda/JavaJodaTimeDuellingTests.java | 4 +- .../rounding/TimeZoneRoundingTests.java | 2 +- .../index/mapper/NestedObjectMapperTests.java | 4 +- .../index/query/InnerHitBuilderTests.java | 2 +- .../search/CreatePitSingleNodeTests.java | 14 +-- .../bucket/DateScriptMocksPlugin.java | 2 +- ...egacyIntervalCompositeAggBuilderTests.java | 2 +- .../metrics/MetricAggScriptPlugin.java | 2 +- .../pipeline/AvgBucketAggregatorTests.java | 6 +- .../pipeline/SearchPipelineInfoTests.java | 2 +- .../action/support/ActionTestUtils.java | 2 +- .../org/opensearch/cli/CommandTestCase.java | 2 +- ...archAllocationWithConstraintsTestCase.java | 44 ++++----- .../coordination/LinearizabilityChecker.java | 10 +- .../common/logging/JsonLogsIntegTestCase.java | 4 +- .../index/MockEngineFactoryPlugin.java | 2 +- .../index/RandomCreateIndexGenerator.java | 2 +- .../index/mapper/FieldMapperTestCase.java | 4 +- .../index/mapper/FieldMapperTestCase2.java | 4 +- .../index/shard/IndexShardTestCase.java | 2 +- .../mockito/plugin/PriviledgedMockMaker.java | 2 +- ...chMockAPIBasedRepositoryIntegTestCase.java | 6 +- .../opensearch/script/MockScriptEngine.java | 6 +- .../org/opensearch/script/ScoreAccessor.java | 2 +- .../aggregations/AggregatorTestCase.java | 10 +- ...AbstractDiffableSerializationTestCase.java | 4 +- ...ractDiffableWireSerializationTestCase.java | 4 +- .../test/AbstractQueryTestCase.java | 10 +- .../opensearch/test/AbstractWireTestCase.java | 4 +- .../opensearch/test/MockKeywordPlugin.java | 2 +- .../test/OpenSearchIntegTestCase.java | 12 +-- .../test/OpenSearchTokenStreamTestCase.java | 6 +- .../java/org/opensearch/test/TestCluster.java | 2 +- .../opensearch/test/XContentTestUtils.java | 12 +-- .../opensearch/test/client/NoOpClient.java | 2 +- .../test/client/NoOpNodeClient.java | 2 +- .../test/disruption/LongGCDisruption.java | 2 +- .../test/disruption/NetworkDisruption.java | 2 +- .../test/gateway/TestGatewayAllocator.java | 6 +- .../junit/annotations/TestIssueLogging.java | 2 +- .../test/junit/annotations/TestLogging.java | 2 +- .../test/junit/listeners/LoggingListener.java | 2 +- .../test/rest/RestActionTestCase.java | 2 +- .../yaml/DenylistedPathPatternMatcher.java | 2 +- .../opensearch/test/rest/yaml/Features.java | 2 +- .../OpenSearchClientYamlSuiteTestCase.java | 6 +- .../test/rest/yaml/section/DoSection.java | 2 +- .../section/GreaterThanEqualToAssertion.java | 2 +- .../rest/yaml/section/IsFalseAssertion.java | 2 +- .../rest/yaml/section/IsTrueAssertion.java | 2 +- .../rest/yaml/section/LessThanAssertion.java | 2 +- .../section/LessThanOrEqualToAssertion.java | 2 +- .../rest/yaml/section/MatchAssertion.java | 2 +- .../test/rest/yaml/section/SetSection.java | 2 +- 563 files changed, 1304 insertions(+), 1319 deletions(-) diff --git a/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java b/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java index c5b4de157c75c..662510fbbf61c 100644 --- a/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java +++ b/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java @@ -45,17 +45,16 @@ /** * A standalone process that will reap external services after a build dies. - * *

Input

* Since how to reap a given service is platform and service dependent, this tool * operates on system commands to execute. It takes a single argument, a directory * that will contain files with reaping commands. Each line in each file will be * executed with {@link Runtime#exec(String)}. - * + *

* The main method will wait indefinitely on the parent process (Gradle) by * reading from stdin. When Gradle shuts down, whether normally or abruptly, the * pipe will be broken and read will return. - * + *

* The reaper will then iterate over the files in the configured directory, * and execute the given commands. If any commands fail, a failure message is * written to stderr. Otherwise, the input file will be deleted. If no inputs diff --git a/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java b/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java index cddd03ccc2019..4d45640b75e3d 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java @@ -52,15 +52,15 @@ /** * A container for opensearch supported version information used in BWC testing. - * + *

* Parse the Java source file containing the versions declarations and use the known rules to figure out which are all * the version the current one is wire and index compatible with. * On top of this, figure out which of these are unreleased and provide the branch they can be built from. - * + *

* Note that in this context, currentVersion is the unreleased version this build operates on. * At any point in time there will surely be four such unreleased versions being worked on, * thus currentVersion will be one of these. - * + *

* Considering: *

*
M, M > 0
@@ -84,7 +84,7 @@ * Each build is only concerned with versions before it, as those are the ones that need to be tested * for backwards compatibility. We never look forward, and don't add forward facing version number to branches of previous * version. - * + *

* Each branch has a current version, and expected compatible versions are parsed from the server code's Version` class. * We can reliably figure out which the unreleased versions are due to the convention of always adding the next unreleased * version number to server in all branches when a version is released. diff --git a/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java b/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java index 5ae7ad1595e2f..5259700b3a63d 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java @@ -38,7 +38,7 @@ /** * Writes data passed to this stream as log messages. - * + *

* The stream will be flushed whenever a newline is detected. * Allows setting an optional prefix before each line of output. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionRule.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionRule.java index aa81ef75701fa..db46d2e3edc55 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionRule.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionRule.java @@ -40,7 +40,7 @@ /** * Represent rules for tests enforced by the @{link {@link TestingConventionsTasks}} - * + *

* Rules are identified by name, tests must have this name as a suffix and implement one of the base classes * and be part of all the specified tasks. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java b/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java index 1423b52c443d9..e82d8ed73ced2 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java @@ -61,7 +61,7 @@ /** * A custom archive task that assembles a tar archive that preserves symbolic links. - * + *

* This task is necessary because the built-in task {@link org.gradle.api.tasks.bundling.Tar} does not preserve symbolic links. */ public class SymbolicLinkPreservingTar extends Tar { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java index 728e36ce98bff..fcadf35593ce6 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java @@ -65,7 +65,7 @@ * Rest YAML tests :
* When the {@link RestResourcesPlugin} has been applied the {@link CopyRestTestsTask} will copy the Rest YAML tests if explicitly * configured with `includeCore` through the `restResources.restTests` extension. - * + *

* Additionally you can specify which sourceSetName resources should be copied to. The default is the yamlRestTest source set. * @see CopyRestApiTask * @see CopyRestTestsTask diff --git a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantMachine.java b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantMachine.java index 2d71b9361963b..7abf9bf5fbef6 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantMachine.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantMachine.java @@ -53,7 +53,7 @@ /** * An helper to manage a vagrant box. - * + *

* This is created alongside a {@link VagrantExtension} for a project to manage starting and * stopping a single vagrant box. */ @@ -185,7 +185,7 @@ public void setArgs(String... args) { /** * A function to translate output from the vagrant command execution to the progress line. - * + *

* The function takes the current line of output from vagrant, and returns a new * progress line, or {@code null} if there is no update. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java index 85d3e340c50e7..ca1b95183505f 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java @@ -47,7 +47,7 @@ /** * A shell script to run within a vagrant VM. - * + *

* The script is run as root within the VM. */ public abstract class VagrantShellTask extends DefaultTask { diff --git a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java index c0e7320ba7615..8e246ff9ecd11 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java @@ -58,7 +58,7 @@ public void tearDown() { * This test is used to verify that adding the 'opensearch.pluginzip' to the project * adds some other transitive plugins and tasks under the hood. This is basically * a behavioral test of the {@link Publish#apply(Project)} method. - * + *

* This is equivalent of having a build.gradle script with just the following section: *

      *     plugins {
@@ -202,7 +202,7 @@ public void useDefaultValues() throws IOException, URISyntaxException, XmlPullPa
         GradleRunner runner = prepareGradleRunnerFromTemplate("useDefaultValues.gradle", "build", ZIP_PUBLISH_TASK);
         BuildResult result = runner.build();
 
-        /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
+        /* Check if build and ZIP_PUBLISH_TASK tasks have run well */
         assertEquals(SUCCESS, result.task(":" + "build").getOutcome());
         assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome());
 
@@ -277,7 +277,7 @@ public void allProjectsGroup() throws IOException, URISyntaxException, XmlPullPa
         GradleRunner runner = prepareGradleRunnerFromTemplate("allProjectsGroup.gradle", "build", ZIP_PUBLISH_TASK);
         BuildResult result = runner.build();
 
-        /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
+        /* Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
         assertEquals(SUCCESS, result.task(":" + "build").getOutcome());
         assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome());
 
@@ -312,7 +312,7 @@ public void groupPriorityLevel() throws IOException, URISyntaxException, XmlPull
         GradleRunner runner = prepareGradleRunnerFromTemplate("groupPriorityLevel.gradle", "build", ZIP_PUBLISH_TASK);
         BuildResult result = runner.build();
 
-        /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
+        /* Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
         assertEquals(SUCCESS, result.task(":" + "build").getOutcome());
         assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome());
 
@@ -348,7 +348,7 @@ public void missingPOMEntity() throws IOException, URISyntaxException, XmlPullPa
         GradleRunner runner = prepareGradleRunnerFromTemplate("missingPOMEntity.gradle", "build", ZIP_PUBLISH_TASK);
         BuildResult result = runner.build();
 
-        /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
+        /* Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
         assertEquals(SUCCESS, result.task(":" + "build").getOutcome());
         assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome());
 
@@ -395,7 +395,7 @@ public void customizedGroupValue() throws IOException, URISyntaxException, XmlPu
         GradleRunner runner = prepareGradleRunnerFromTemplate("customizedGroupValue.gradle", "build", ZIP_PUBLISH_TASK);
         BuildResult result = runner.build();
 
-        /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
+        /* Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
         assertEquals(SUCCESS, result.task(":" + "build").getOutcome());
         assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome());
 
diff --git a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java
index b64c719440733..def5248c1f255 100644
--- a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java
+++ b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java
@@ -36,7 +36,7 @@
 
 /**
  * Filter out threads controlled by gradle that may be created during unit tests.
- *
+ * 

* Currently this includes pooled threads for Exec as well as file system event watcher threads. */ public class GradleThreadsFilter implements ThreadFilter { diff --git a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java index 163a903d31832..1a2e36aa78e9f 100644 --- a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java +++ b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java @@ -43,7 +43,7 @@ /** * Backwards compatible test* method provider (public, non-static). - * + *

* copy of org.apache.lucene.util.LuceneJUnit3MethodProvider to avoid a dependency between build and test fw. */ public final class JUnit3MethodProvider implements TestMethodProvider { diff --git a/client/benchmark/src/main/java/org/opensearch/client/benchmark/metrics/SampleRecorder.java b/client/benchmark/src/main/java/org/opensearch/client/benchmark/metrics/SampleRecorder.java index e53e4f1ad692d..9cd12f5e78bd0 100644 --- a/client/benchmark/src/main/java/org/opensearch/client/benchmark/metrics/SampleRecorder.java +++ b/client/benchmark/src/main/java/org/opensearch/client/benchmark/metrics/SampleRecorder.java @@ -37,7 +37,7 @@ /** * Stores measurement samples. - * + *

* This class is NOT threadsafe. */ public final class SampleRecorder { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java index 5bd5a5d0e308e..eb0a8b0e8f40a 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java @@ -170,8 +170,8 @@ public ClusterHealthResponse health(ClusterHealthRequest healthRequest, RequestO /** * Asynchronously get cluster health using the Cluster Health API. - * * If timeout occurred, {@link ClusterHealthResponse} will have isTimedOut() == true and status() == RestStatus.REQUEST_TIMEOUT + * * @param healthRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index 61a202d25167f..35d9929a649ff 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -451,9 +451,9 @@ static void addSearchRequestParams(Params params, SearchRequest searchRequest) { params.withIndicesOptions(searchRequest.indicesOptions()); } params.withSearchType(searchRequest.searchType().name().toLowerCase(Locale.ROOT)); - /** - * Merging search responses as part of CCS flow to reduce roundtrips is not supported for point in time - - * refer to org.opensearch.action.search.SearchResponseMerger + /* + Merging search responses as part of CCS flow to reduce roundtrips is not supported for point in time - + refer to org.opensearch.action.search.SearchResponseMerger */ if (searchRequest.pointInTimeBuilder() != null) { params.putParam("ccs_minimize_roundtrips", "false"); diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java index dad5b6a3679ec..d40445b2daa81 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java @@ -37,7 +37,7 @@ /** * A base request for any requests that supply timeouts. - * + *

* Please note, any requests that use a ackTimeout should set timeout as they * represent the same backing field on the server. */ diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java index 7805a7853b003..62c5b54c0e75e 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java @@ -156,7 +156,7 @@ public MediaType mappingsMediaType() { /** * Adds mapping that will be added when the index gets created. - * + *

* Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -168,7 +168,7 @@ public CreateIndexRequest mapping(String source, MediaType mediaType) { /** * Adds mapping that will be added when the index gets created. - * + *

* Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -179,7 +179,7 @@ public CreateIndexRequest mapping(XContentBuilder source) { /** * Adds mapping that will be added when the index gets created. - * + *

* Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -196,7 +196,7 @@ public CreateIndexRequest mapping(Map source) { /** * Adds mapping that will be added when the index gets created. - * + *

* Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -282,7 +282,7 @@ public CreateIndexRequest aliases(Collection aliases) { /** * Sets the settings and mappings as a single source. - * + *

* Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(String source, MediaType mediaType) { @@ -291,7 +291,7 @@ public CreateIndexRequest source(String source, MediaType mediaType) { /** * Sets the settings and mappings as a single source. - * + *

* Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(XContentBuilder source) { @@ -300,7 +300,7 @@ public CreateIndexRequest source(XContentBuilder source) { /** * Sets the settings and mappings as a single source. - * + *

* Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(BytesReference source, MediaType mediaType) { @@ -311,7 +311,7 @@ public CreateIndexRequest source(BytesReference source, MediaType mediaType) { /** * Sets the settings and mappings as a single source. - * + *

* Note that the mapping definition should *not* be nested under a type name. */ @SuppressWarnings("unchecked") diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java index 6d7e95d191ba6..a63393bd2341b 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java @@ -105,7 +105,7 @@ public MediaType mediaType() { /** * The mapping source definition. - * + *

* Note that the definition should *not* be nested under a type name. */ public PutMappingRequest source(Map mappingSource) { @@ -120,7 +120,7 @@ public PutMappingRequest source(Map mappingSource) { /** * The mapping source definition. - * + *

* Note that the definition should *not* be nested under a type name. */ public PutMappingRequest source(String mappingSource, MediaType mediaType) { @@ -131,7 +131,7 @@ public PutMappingRequest source(String mappingSource, MediaType mediaType) { /** * The mapping source definition. - * + *

* Note that the definition should *not* be nested under a type name. */ public PutMappingRequest source(XContentBuilder builder) { @@ -142,7 +142,7 @@ public PutMappingRequest source(XContentBuilder builder) { /** * The mapping source definition. - * + *

* Note that the definition should *not* be nested under a type name. */ public PutMappingRequest source(BytesReference source, MediaType mediaType) { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskGroup.java b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskGroup.java index c419884700587..9129de717459f 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskGroup.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskGroup.java @@ -38,7 +38,6 @@ /** * Client side counterpart of server side version. - * * {@link org.opensearch.action.admin.cluster.node.tasks.list.TaskGroup} */ public class TaskGroup { diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/AbstractRequestTestCase.java b/client/rest-high-level/src/test/java/org/opensearch/client/AbstractRequestTestCase.java index 49bcb61b2dc3d..c464ee9ece74a 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/AbstractRequestTestCase.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/AbstractRequestTestCase.java @@ -44,7 +44,7 @@ /** * Base class for HLRC request parsing tests. - * + *

* This case class facilitates generating client side request test instances and * verifies that they are correctly parsed into server side request instances. * diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/AbstractResponseTestCase.java b/client/rest-high-level/src/test/java/org/opensearch/client/AbstractResponseTestCase.java index 27704b01560c4..7d2d6b87b85c6 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/AbstractResponseTestCase.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/AbstractResponseTestCase.java @@ -44,7 +44,7 @@ /** * Base class for HLRC response parsing tests. - * + *

* This case class facilitates generating server side response test instances and * verifies that they are correctly parsed into HLRC response instances. * diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorRetryIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorRetryIT.java index b7f6328b3c88e..3678cc042ba47 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorRetryIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorRetryIT.java @@ -180,7 +180,7 @@ private static MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) { /** * Internal helper class to correlate backoff states with bulk responses. This is needed to check whether we maxed out the number * of retries but still got rejected (which is perfectly fine and can also happen from time to time under heavy load). - * + *

* This implementation relies on an implementation detail in Retry, namely that the bulk listener is notified on the same thread * as the last call to the backoff policy's iterator. The advantage is that this is non-invasive to the rest of the production code. */ diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java index abb2d75aea751..ce080b45273b4 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java @@ -137,15 +137,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[example] * -------------------------------------------------- - * + *

* The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java index d14759065b5eb..28909cf58541a 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java @@ -65,15 +65,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/IngestClientDocumentationIT.java[example] * -------------------------------------------------- - * + *

* The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java index 50bcf79642eac..d0015db044843 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java @@ -90,15 +90,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[example] * -------------------------------------------------- - * + *

* The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java index 6916ae11556e2..2e2d15df5392a 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java @@ -66,15 +66,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[example] * -------------------------------------------------- - * + *

* The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/TasksClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/TasksClientDocumentationIT.java index 03e267aafd1b7..cbac0b8c97d9c 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/TasksClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/TasksClientDocumentationIT.java @@ -66,15 +66,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/{@link TasksClientDocumentationIT}.java[example] * -------------------------------------------------- - * + *

* The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/RandomCreateIndexGenerator.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/RandomCreateIndexGenerator.java index 1f747dc139d15..edb4d16c6d992 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/RandomCreateIndexGenerator.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/RandomCreateIndexGenerator.java @@ -44,7 +44,7 @@ public class RandomCreateIndexGenerator { /** * Returns a random {@link CreateIndexRequest}. - * + *

* Randomizes the index name, the aliases, mappings and settings associated with the * index. When present, the mappings make no mention of types. */ diff --git a/client/rest/src/main/java/org/opensearch/client/RestClient.java b/client/rest/src/main/java/org/opensearch/client/RestClient.java index e819fa27a8939..7691c01daefea 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClient.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClient.java @@ -310,7 +310,7 @@ public boolean isRunning() { * they will be retried). In case of failures all of the alive nodes (or * dead nodes that deserve a retry) are retried until one responds or none * of them does, in which case an {@link IOException} will be thrown. - * + *

* This method works by performing an asynchronous call and waiting * for the result. If the asynchronous call throws an exception we wrap * it and rethrow it so that the stack trace attached to the exception diff --git a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java index b2807d35d230e..42c31864e0578 100644 --- a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java @@ -89,15 +89,15 @@ * You need to wrap your code between two tags like: * // tag::example[] * // end::example[] - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/RestClientDocumentation.java[example] * -------------------------------------------------- - * + *

* Note that this is not a test class as we are only interested in testing that docs snippets compile. We don't want * to send requests to a node and we don't even have the tools to do it. */ diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java index 440e9a2ea5cd1..8a4ca1fb0a136 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java @@ -49,15 +49,15 @@ * You need to wrap your code between two tags like: * // tag::example[] * // end::example[] - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/SnifferDocumentation.java[example] * -------------------------------------------------- - * + *

* Note that this is not a test class as we are only interested in testing that docs snippets compile. We don't want * to send requests to a node and we don't even have the tools to do it. */ diff --git a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java index aa3dfbe39ee96..726c381db09f6 100644 --- a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java +++ b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java @@ -101,15 +101,15 @@ private static String maybeShowCodeDetailsInExceptionMessages() { } private static String javaLocaleProviders() { - /** - * SPI setting is used to allow loading custom CalendarDataProvider - * in jdk8 it has to be loaded from jre/lib/ext, - * in jdk9+ it is already within ES project and on a classpath - * - * Due to internationalization enhancements in JDK 9 OpenSearch need to set the provider to COMPAT otherwise time/date - * parsing will break in an incompatible way for some date patterns and locales. - * //TODO COMPAT will be deprecated in at some point, see please https://bugs.openjdk.java.net/browse/JDK-8232906 - * See also: documentation in server/org.opensearch.common.time.IsoCalendarDataProvider + /* + SPI setting is used to allow loading custom CalendarDataProvider + in jdk8 it has to be loaded from jre/lib/ext, + in jdk9+ it is already within ES project and on a classpath + + Due to internationalization enhancements in JDK 9 OpenSearch need to set the provider to COMPAT otherwise time/date + parsing will break in an incompatible way for some date patterns and locales. + //TODO COMPAT will be deprecated in at some point, see please https://bugs.openjdk.java.net/browse/JDK-8232906 + See also: documentation in server/org.opensearch.common.time.IsoCalendarDataProvider */ return "-Djava.locale.providers=SPI,COMPAT"; } diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java index 66f43b1e30d28..838d6e22a37bd 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java @@ -107,7 +107,7 @@ /** * A command for the plugin cli to install a plugin into opensearch. - * + *

* The install command takes a plugin id, which may be any of the following: *

    *
  • An official opensearch plugin name
  • @@ -411,7 +411,7 @@ private String getMavenUrl(Terminal terminal, String[] coordinates, String platf /** * Returns {@code true} if the given url exists, and {@code false} otherwise. - * + *

    * The given url must be {@code https} and existing means a {@code HEAD} request returns 200. */ // pkg private for tests to manipulate @@ -698,7 +698,6 @@ InputStream getPublicKey() { /** * Creates a URL and opens a connection. - * * If the URL returns a 404, {@code null} is returned, otherwise the open URL opject is returned. */ // pkg private for tests diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ProgressInputStream.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ProgressInputStream.java index 579f676631a5a..02be3dbc82a44 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ProgressInputStream.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ProgressInputStream.java @@ -41,7 +41,7 @@ * The listener is triggered whenever a full percent is increased * The listener is never triggered twice on the same percentage * The listener will always return 99 percent, if the expectedTotalSize is exceeded, until it is finished - * + *

    * Only used by the InstallPluginCommand, thus package private here */ abstract class ProgressInputStream extends FilterInputStream { diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeTask.java index b7dcbd50cf781..708f644bcdeb6 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeTask.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeTask.java @@ -17,7 +17,7 @@ * An interface for an upgrade task, which in this instance is an unit of * operation that is part of the overall upgrade process. This extends the * {@link java.util.function.Consumer} interface. - * + *

    * The implementing tasks consume and instance of a tuple of {@link TaskInput} * and {@link Terminal} and operate via side effects. * diff --git a/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java b/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java index e6122e7baf91a..e1ad55fe4b60b 100644 --- a/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java +++ b/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java @@ -45,7 +45,7 @@ * It isn't recursive, just ignores exactly the elements you tell it. * Has option --missing-method to apply "method" level to selected packages (fix one at a time). * Matches package names exactly: so you'll need to list subpackages separately. - * + *

    * Note: This by default ignores javadoc validation on overridden methods. */ // Original version of this class is ported from MissingDoclet code in Lucene, diff --git a/libs/cli/src/main/java/org/opensearch/cli/Command.java b/libs/cli/src/main/java/org/opensearch/cli/Command.java index eed5c4ba4ee6f..cc9230bdb2282 100644 --- a/libs/cli/src/main/java/org/opensearch/cli/Command.java +++ b/libs/cli/src/main/java/org/opensearch/cli/Command.java @@ -162,7 +162,7 @@ protected static void exit(int status) { /** * Executes this command. - * + *

    * Any runtime user errors (like an input file that does not exist), should throw a {@link UserException}. */ protected abstract void execute(Terminal terminal, OptionSet options) throws Exception; diff --git a/libs/cli/src/main/java/org/opensearch/cli/Terminal.java b/libs/cli/src/main/java/org/opensearch/cli/Terminal.java index be030c18507ad..fb1097178e5a3 100644 --- a/libs/cli/src/main/java/org/opensearch/cli/Terminal.java +++ b/libs/cli/src/main/java/org/opensearch/cli/Terminal.java @@ -44,7 +44,7 @@ /** * A Terminal wraps access to reading input and writing output for a cli. - * + *

    * The available methods are similar to those of {@link Console}, with the ability * to read either normal text or a password, and the ability to print a line * of text. Printing is also gated by the {@link Verbosity} of the terminal, diff --git a/libs/common/src/main/java/org/opensearch/bootstrap/JarHell.java b/libs/common/src/main/java/org/opensearch/bootstrap/JarHell.java index c4ba778e7db86..fc5e364241d12 100644 --- a/libs/common/src/main/java/org/opensearch/bootstrap/JarHell.java +++ b/libs/common/src/main/java/org/opensearch/bootstrap/JarHell.java @@ -104,7 +104,7 @@ public static void checkJarHell(Consumer output) throws IOException, URI /** * Parses the classpath into an array of URLs - * @return array of URLs + * @return collection of URLs * @throws IllegalStateException if the classpath contains empty elements */ public static Set parseClassPath() { @@ -114,7 +114,7 @@ public static Set parseClassPath() { /** * Parses the classpath into a set of URLs. For testing. * @param classPath classpath to parse (typically the system property {@code java.class.path}) - * @return array of URLs + * @return collection of URLs * @throws IllegalStateException if the classpath contains empty elements */ @SuppressForbidden(reason = "resolves against CWD because that is how classpaths work") diff --git a/libs/common/src/main/java/org/opensearch/common/Explicit.java b/libs/common/src/main/java/org/opensearch/common/Explicit.java index 66e079c461e75..e060baf6f187e 100644 --- a/libs/common/src/main/java/org/opensearch/common/Explicit.java +++ b/libs/common/src/main/java/org/opensearch/common/Explicit.java @@ -38,7 +38,7 @@ * Holds a value that is either: * a) set implicitly e.g. through some default value * b) set explicitly e.g. from a user selection - * + *

    * When merging conflicting configuration settings such as * field mapping settings it is preferable to preserve an explicit * choice rather than a choice made only made implicitly by defaults. diff --git a/libs/common/src/main/java/org/opensearch/common/LocalTimeOffset.java b/libs/common/src/main/java/org/opensearch/common/LocalTimeOffset.java index 7e89641927ed5..eb7b331c9aa24 100644 --- a/libs/common/src/main/java/org/opensearch/common/LocalTimeOffset.java +++ b/libs/common/src/main/java/org/opensearch/common/LocalTimeOffset.java @@ -514,7 +514,7 @@ public boolean anyMoveBackToPreviousDay() { * Builds an array that can be {@link Arrays#binarySearch(long[], long)}ed * for the daylight savings time transitions. * - * @openearch.internal + * @opensearch.internal */ private static class TransitionArrayLookup extends AbstractManyTransitionsLookup { private final LocalTimeOffset[] offsets; diff --git a/libs/common/src/main/java/org/opensearch/common/SetOnce.java b/libs/common/src/main/java/org/opensearch/common/SetOnce.java index a596b5fcdb61d..778926ce108b7 100644 --- a/libs/common/src/main/java/org/opensearch/common/SetOnce.java +++ b/libs/common/src/main/java/org/opensearch/common/SetOnce.java @@ -35,7 +35,7 @@ * A convenient class which offers a semi-immutable object wrapper implementation which allows one * to set the value of an object exactly once, and retrieve it many times. If {@link #set(Object)} * is called more than once, {@link AlreadySetException} is thrown and the operation will fail. - * + *

    * This is borrowed from lucene's experimental API. It is not reused to eliminate the dependency * on lucene core for such a simple (standalone) utility class that may change beyond OpenSearch needs. * diff --git a/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java b/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java index d0b94536b0729..5c0e3f2de7708 100644 --- a/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java +++ b/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java @@ -34,7 +34,6 @@ /** * Java 9 Tuple - * * todo: deprecate and remove w/ min jdk upgrade to 11? * * @opensearch.internal diff --git a/libs/common/src/main/java/org/opensearch/common/io/PathUtils.java b/libs/common/src/main/java/org/opensearch/common/io/PathUtils.java index b3526859933ec..ed8d50892b74a 100644 --- a/libs/common/src/main/java/org/opensearch/common/io/PathUtils.java +++ b/libs/common/src/main/java/org/opensearch/common/io/PathUtils.java @@ -93,7 +93,7 @@ public static Path get(URI uri) { /** * Tries to resolve the given path against the list of available roots. - * + *

    * If path starts with one of the listed roots, it returned back by this method, otherwise null is returned. */ public static Path get(Path[] roots, String path) { @@ -109,7 +109,7 @@ public static Path get(Path[] roots, String path) { /** * Tries to resolve the given file uri against the list of available roots. - * + *

    * If uri starts with one of the listed roots, it returned back by this method, otherwise null is returned. */ public static Path get(Path[] roots, URI uri) { diff --git a/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java b/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java index a4fbc6cb65b0d..0f289c09bbae2 100644 --- a/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java +++ b/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java @@ -368,7 +368,7 @@ public static InetAddress forString(String ipString) { /** * Convert a byte array into an InetAddress. - * + *

    * {@link InetAddress#getByAddress} is documented as throwing a checked * exception "if IP address is of illegal length." We replace it with * an unchecked exception, for use by callers who already know that addr @@ -423,7 +423,7 @@ public static Tuple parseCidr(String maskedAddress) { /** * Given an address and prefix length, returns the string representation of the range in CIDR notation. - * + *

    * See {@link #toAddrString} for details on how the address is represented. */ public static String toCidrString(InetAddress address, int prefixLength) { diff --git a/libs/common/src/main/java/org/opensearch/common/unit/TimeValue.java b/libs/common/src/main/java/org/opensearch/common/unit/TimeValue.java index a3fcffb1d6a4c..30ed5bf63a748 100644 --- a/libs/common/src/main/java/org/opensearch/common/unit/TimeValue.java +++ b/libs/common/src/main/java/org/opensearch/common/unit/TimeValue.java @@ -224,10 +224,10 @@ public double getDaysFrac() { /** * Returns a {@link String} representation of the current {@link TimeValue}. - * + *

    * Note that this method might produce fractional time values (ex 1.6m) which cannot be * parsed by method like {@link TimeValue#parse(String, String, String, String)}. - * + *

    * Also note that the maximum string value that will be generated is * {@code 106751.9d} due to the way that values are internally converted * to nanoseconds (106751.9 days is Long.MAX_VALUE nanoseconds) @@ -239,12 +239,12 @@ public String toString() { /** * Returns a {@link String} representation of the current {@link TimeValue}. - * + *

    * Note that this method might produce fractional time values (ex 1.6m) which cannot be * parsed by method like {@link TimeValue#parse(String, String, String, String)}. The number of * fractional decimals (up to 10 maximum) are truncated to the number of fraction pieces * specified. - * + *

    * Also note that the maximum string value that will be generated is * {@code 106751.9d} due to the way that values are internally converted * to nanoseconds (106751.9 days is Long.MAX_VALUE nanoseconds) diff --git a/libs/common/src/main/java/org/opensearch/common/util/BitMixer.java b/libs/common/src/main/java/org/opensearch/common/util/BitMixer.java index 8762217916c7a..d6ea4fa359df3 100644 --- a/libs/common/src/main/java/org/opensearch/common/util/BitMixer.java +++ b/libs/common/src/main/java/org/opensearch/common/util/BitMixer.java @@ -25,9 +25,9 @@ /** * Bit mixing utilities from carrotsearch.hppc. - * + *

    * Licensed under ALv2. This is pulled in directly to avoid a full hppc dependency. - * + *

    * The purpose of these methods is to evenly distribute key space over int32 * range. */ @@ -111,7 +111,7 @@ public static int mix32(int k) { /** * Computes David Stafford variant 9 of 64bit mix function (MH3 finalization step, * with different shifts and constants). - * + *

    * Variant 9 is picked because it contains two 32-bit shifts which could be possibly * optimized into better machine code. * diff --git a/libs/core/src/main/java/org/opensearch/Build.java b/libs/core/src/main/java/org/opensearch/Build.java index 67a50a8a31a0e..b5d67f5501725 100644 --- a/libs/core/src/main/java/org/opensearch/Build.java +++ b/libs/core/src/main/java/org/opensearch/Build.java @@ -216,7 +216,7 @@ public String getDistribution() { /** * Get the version as considered at build time - * + *

    * Offers a way to get the fully qualified version as configured by the build. * This will be the same as {@link Version} for production releases, but may include on of the qualifier ( e.x alpha1 ) * or -SNAPSHOT for others. diff --git a/libs/core/src/main/java/org/opensearch/LegacyESVersion.java b/libs/core/src/main/java/org/opensearch/LegacyESVersion.java index 32eae654cf975..5d8e067a8fd8b 100644 --- a/libs/core/src/main/java/org/opensearch/LegacyESVersion.java +++ b/libs/core/src/main/java/org/opensearch/LegacyESVersion.java @@ -40,7 +40,7 @@ /** * The Contents of this file were originally moved from {@link Version}. - * + *

    * This class keeps all the supported OpenSearch predecessor versions for * backward compatibility purpose. * diff --git a/libs/core/src/main/java/org/opensearch/OpenSearchException.java b/libs/core/src/main/java/org/opensearch/OpenSearchException.java index 5bad711a15032..cce86b452f698 100644 --- a/libs/core/src/main/java/org/opensearch/OpenSearchException.java +++ b/libs/core/src/main/java/org/opensearch/OpenSearchException.java @@ -168,7 +168,7 @@ public OpenSearchException(Throwable cause) { /** * Construct a OpenSearchException with the specified detail message. - * + *

    * The message can be parameterized using {} as placeholders for the given * arguments * @@ -182,7 +182,7 @@ public OpenSearchException(String msg, Object... args) { /** * Construct a OpenSearchException with the specified detail message * and nested exception. - * + *

    * The message can be parameterized using {} as placeholders for the given * arguments * @@ -587,7 +587,7 @@ public static OpenSearchException innerFromXContent(XContentParser parser, boole * Static toXContent helper method that renders {@link OpenSearchException} or {@link Throwable} instances * as XContent, delegating the rendering to {@link OpenSearchException#toXContent(XContentBuilder, ToXContent.Params)} * or {@link #innerToXContent(XContentBuilder, ToXContent.Params, Throwable, String, String, Map, Map, Throwable)}. - * + *

    * This method is usually used when the {@link Throwable} is rendered as a part of another XContent object, and its result can * be parsed back using the {@code OpenSearchException.fromXContent(XContentParser)} method. */ @@ -606,7 +606,7 @@ public static void generateThrowableXContent(XContentBuilder builder, ToXContent * depends on the value of the "detailed" parameter: when it's false only a simple message based on the type and message of the * exception is rendered. When it's true all detail are provided including guesses root causes, cause and potentially stack * trace. - * + *

    * This method is usually used when the {@link Exception} is rendered as a full XContent object, and its output can be parsed * by the {@code #OpenSearchException.failureFromXContent(XContentParser)} method. */ diff --git a/libs/core/src/main/java/org/opensearch/core/action/ActionListener.java b/libs/core/src/main/java/org/opensearch/core/action/ActionListener.java index 119e56cfe0bf2..4fd55898a2cb5 100644 --- a/libs/core/src/main/java/org/opensearch/core/action/ActionListener.java +++ b/libs/core/src/main/java/org/opensearch/core/action/ActionListener.java @@ -154,9 +154,9 @@ static ActionListener wrap(Runnable runnable) { /** * Creates a listener that wraps another listener, mapping response values via the given mapping function and passing along * exceptions to the delegate. - * + *

    * Notice that it is considered a bug if the listener's onResponse or onFailure fails. onResponse failures will not call onFailure. - * + *

    * If the function fails, the listener's onFailure handler will be called. The principle is that the mapped listener will handle * exceptions from the mapping function {@code fn} but it is the responsibility of {@code delegate} to handle its own exceptions * inside `onResponse` and `onFailure`. @@ -334,7 +334,7 @@ protected void innerOnFailure(Exception e) { /** * Completes the given listener with the result from the provided supplier accordingly. * This method is mainly used to complete a listener with a block of synchronous code. - * + *

    * If the supplier fails, the listener's onFailure handler will be called. * It is the responsibility of {@code delegate} to handle its own exceptions inside `onResponse` and `onFailure`. */ diff --git a/libs/core/src/main/java/org/opensearch/core/common/Strings.java b/libs/core/src/main/java/org/opensearch/core/common/Strings.java index 6227716af9cc9..8fdec670bd9f2 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/Strings.java +++ b/libs/core/src/main/java/org/opensearch/core/common/Strings.java @@ -38,7 +38,7 @@ /** * String utility class. - * + *

    * TODO replace Strings in :server * * @opensearch.internal diff --git a/libs/core/src/main/java/org/opensearch/core/common/bytes/CompositeBytesReference.java b/libs/core/src/main/java/org/opensearch/core/common/bytes/CompositeBytesReference.java index 53915a3da824c..1a48abee2dbf8 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/bytes/CompositeBytesReference.java +++ b/libs/core/src/main/java/org/opensearch/core/common/bytes/CompositeBytesReference.java @@ -45,7 +45,7 @@ /** * A composite {@link BytesReference} that allows joining multiple bytes references * into one without copying. - * + *

    * Note, {@link #toBytesRef()} will materialize all pages in this BytesReference. * * @opensearch.internal diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java index a50d1c165ed72..30c84708728ef 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java @@ -17,7 +17,7 @@ * {@link StreamInput} version of Lucene's {@link org.apache.lucene.store.ByteArrayDataInput} * This is used as a replacement of Lucene ByteArrayDataInput for abstracting byte order changes * in Lucene's API - * + *

    * Attribution given to apache lucene project under ALv2: * * Licensed to the Apache Software Foundation (ASF) under one or more diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/NamedWriteableRegistry.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/NamedWriteableRegistry.java index abac76c8b6c27..123b52eb92876 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/NamedWriteableRegistry.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/NamedWriteableRegistry.java @@ -43,7 +43,7 @@ /** * A registry for {@link Writeable.Reader} readers of {@link NamedWriteable}. - * + *

    * The registration is keyed by the combination of the category class of {@link NamedWriteable}, and a name unique * to that category. * diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java index ece2012302919..3e996bdee83a2 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java @@ -96,7 +96,7 @@ /** * A stream from this node to another node. Technically, it can also be streamed to a byte array but that is mostly for testing. - * + *

    * This class's methods are optimized so you can put the methods that read and write a class next to each other and you can scan them * visually for differences. That means that most variables should be read and written in a single line so even large objects fit both * reading and writing on the screen. It also means that the methods on this class are named very similarly to {@link StreamOutput}. Finally @@ -1128,7 +1128,7 @@ public C readNamedWriteable(@SuppressWarnings("unused * the corresponding entry in the registry by name, so that the proper object can be read and returned. * Default implementation throws {@link UnsupportedOperationException} as StreamInput doesn't hold a registry. * Use {@link FilterInputStream} instead which wraps a stream and supports a {@link NamedWriteableRegistry} too. - * + *

    * Prefer {@link StreamInput#readNamedWriteable(Class)} and {@link StreamOutput#writeNamedWriteable(NamedWriteable)} unless you * have a compelling reason to use this method instead. */ diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java index 94b813246bc7e..2d69e1c686df3 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java @@ -88,7 +88,7 @@ /** * A stream from another node to this node. Technically, it can also be streamed from a byte array but that is mostly for testing. - * + *

    * This class's methods are optimized so you can put the methods that read and write a class next to each other and you can scan them * visually for differences. That means that most variables should be read and written in a single line so even large objects fit both * reading and writing on the screen. It also means that the methods on this class are named very similarly to {@link StreamInput}. Finally diff --git a/libs/core/src/main/java/org/opensearch/core/common/settings/SecureString.java b/libs/core/src/main/java/org/opensearch/core/common/settings/SecureString.java index 322300a554284..45ee72f558724 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/settings/SecureString.java +++ b/libs/core/src/main/java/org/opensearch/core/common/settings/SecureString.java @@ -50,7 +50,7 @@ public final class SecureString implements CharSequence, Closeable { /** * Constructs a new SecureString which controls the passed in char array. - * + *

    * Note: When this instance is closed, the array will be zeroed out. */ public SecureString(char[] chars) { @@ -59,7 +59,7 @@ public SecureString(char[] chars) { /** * Constructs a new SecureString from an existing String. - * + *

    * NOTE: This is not actually secure, since the provided String cannot be deallocated, but * this constructor allows for easy compatibility between new and old apis. * diff --git a/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeUnit.java b/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeUnit.java index c15db75d06d49..49eadbbb2bc00 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeUnit.java +++ b/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeUnit.java @@ -45,7 +45,7 @@ * A {@code SizeUnit} does not maintain size information, but only * helps organize and use size representations that may be maintained * separately across various contexts. - * + *

    * It use conventional data storage values (base-2) : *

      *
    • 1KB = 1024 bytes
    • diff --git a/libs/core/src/main/java/org/opensearch/core/compress/Compressor.java b/libs/core/src/main/java/org/opensearch/core/compress/Compressor.java index 27d5b5dfdfa15..5324ea6151e51 100644 --- a/libs/core/src/main/java/org/opensearch/core/compress/Compressor.java +++ b/libs/core/src/main/java/org/opensearch/core/compress/Compressor.java @@ -43,7 +43,7 @@ /** * Compressor interface used for compressing {@link org.opensearch.core.xcontent.MediaType} and * {@code org.opensearch.repositories.blobstore.BlobStoreRepository} implementations. - * + *

      * This is not to be confused with {@link org.apache.lucene.codecs.compressing.Compressor} which is used * for codec implementations such as {@code org.opensearch.index.codec.customcodecs.Lucene95CustomCodec} * for compressing {@link org.apache.lucene.document.StoredField}s diff --git a/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java b/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java index 9290254c30d8d..af09a7aebba79 100644 --- a/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java +++ b/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java @@ -23,7 +23,7 @@ /** * A registry that wraps a static Map singleton which holds a mapping of unique String names (typically the * compressor header as a string) to registerd {@link Compressor} implementations. - * + *

      * This enables plugins, modules, extensions to register their own compression implementations through SPI * * @opensearch.experimental @@ -105,7 +105,7 @@ public static Compressor getCompressor(final String name) { /** * Returns the registered compressors as an Immutable collection - * + *

      * note: used for testing */ public static Map registeredCompressors() { diff --git a/libs/core/src/main/java/org/opensearch/core/compress/spi/CompressorProvider.java b/libs/core/src/main/java/org/opensearch/core/compress/spi/CompressorProvider.java index 019e282444d64..9b806618fe0a0 100644 --- a/libs/core/src/main/java/org/opensearch/core/compress/spi/CompressorProvider.java +++ b/libs/core/src/main/java/org/opensearch/core/compress/spi/CompressorProvider.java @@ -18,7 +18,7 @@ /** * Service Provider Interface for plugins, modules, extensions providing custom * compression algorithms - * + *

      * see {@link Compressor} for implementing methods * and {@link org.opensearch.core.compress.CompressorRegistry} for the registration of custom * Compressors diff --git a/libs/core/src/main/java/org/opensearch/core/index/Index.java b/libs/core/src/main/java/org/opensearch/core/index/Index.java index b5ee482bcd952..a927179114188 100644 --- a/libs/core/src/main/java/org/opensearch/core/index/Index.java +++ b/libs/core/src/main/java/org/opensearch/core/index/Index.java @@ -48,7 +48,7 @@ /** * A value class representing the basic required properties of an OpenSearch index. - * + *

      * (This class is immutable.) * * @opensearch.api @@ -116,7 +116,7 @@ public String getUUID() { /** * Returns either the name and unique identifier of the index * or only the name if the uuid is {@link Strings#UNKNOWN_UUID_VALUE}. - * + *

      * If we have a uuid we put it in the toString so it'll show up in logs * which is useful as more and more things use the uuid rather * than the name as the lookup key for the index. diff --git a/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java b/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java index 984434190b486..c0abad7ed727f 100644 --- a/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java +++ b/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java @@ -143,7 +143,7 @@ public String toString() { /** * Parse the string representation of this shardId back to an object. - * + *

      * We lose index uuid information here, but since we use toString in * rest responses, this is the best we can do to reconstruct the object * on the client side. diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java index a0e2a54fce91c..79e531a542026 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java @@ -108,7 +108,7 @@ public abstract void declareNamedObject( * * Unlike the other version of this method, "ordered" mode (arrays of * objects) is not supported. - * + *

      * See NamedObjectHolder in ObjectParserTests for examples of how to invoke * this. * @@ -163,7 +163,7 @@ public abstract void declareNamedObjects( * the order sent but tools that generate json are free to put object * members in an unordered Map, jumbling them. Thus, if you care about order * you can send the object in the second way. - * + *

      * See NamedObjectHolder in ObjectParserTests for examples of how to invoke * this. * @@ -366,10 +366,10 @@ public void declareFieldArray( /** * Declares a set of fields that are required for parsing to succeed. Only one of the values * provided per String[] must be matched. - * + *

      * E.g. declareRequiredFieldSet("foo", "bar"); means at least one of "foo" or * "bar" fields must be present. If neither of those fields are present, an exception will be thrown. - * + *

      * Multiple required sets can be configured: * *

      
      @@ -379,7 +379,7 @@ public  void declareFieldArray(
            *
            * requires that one of "foo" or "bar" fields are present, and also that one of "bizz" or
            * "buzz" fields are present.
      -     *
      +     * 

      * In JSON, it means any of these combinations are acceptable: * *

        @@ -415,12 +415,12 @@ public void declareFieldArray( /** * Declares a set of fields of which at most one must appear for parsing to succeed - * + *

        * E.g. declareExclusiveFieldSet("foo", "bar"); means that only one of 'foo' * or 'bar' must be present, and if both appear then an exception will be thrown. Note * that this does not make 'foo' or 'bar' required - see {@link #declareRequiredFieldSet(String...)} * for required fields. - * + *

        * Multiple exclusive sets may be declared * * @param exclusiveSet a set of field names, at most one of which must appear diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/MapXContentParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/MapXContentParser.java index 254c340f8836f..0a5cda324ddb7 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/MapXContentParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/MapXContentParser.java @@ -277,7 +277,7 @@ public Token currentToken() { /** * field name that the child element needs to inherit. - * + *

        * In most cases this is the same as currentName() except with embedded arrays. In "foo": [[42]] the first START_ARRAY * token will have the name "foo", but the second START_ARRAY will have no name. */ diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java index a38bdd049ee88..976f353100c55 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java @@ -726,7 +726,7 @@ public XContentBuilder value(byte[] value, int offset, int length) throws IOExce /** * Writes the binary content of the given byte array as UTF-8 bytes. - * + *

        * Use {@link XContentParser#charBuffer()} to read the value back */ public XContentBuilder utf8Value(byte[] bytes, int offset, int length) throws IOException { diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilderExtension.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilderExtension.java index 0535da1a584be..9b13ebb23be86 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilderExtension.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilderExtension.java @@ -37,7 +37,7 @@ /** * This interface provides a way for non-JDK classes to plug in a way to serialize to xcontent. - * + *

        * It is greatly preferred that you implement {@link ToXContentFragment} * in the class for encoding, however, in some situations you may not own the * class, in which case you can add an implementation here for encoding it. @@ -63,7 +63,7 @@ public interface XContentBuilderExtension { * Used for plugging in a human readable version of a class's encoding. It is assumed that * the human readable equivalent is always behind the {@code toString()} method, so * this transformer returns the raw value to be used. - * + *

        * An example implementation: * *

        @@ -79,7 +79,7 @@ public interface XContentBuilderExtension {
             /**
              * Used for plugging a transformer for a date or time type object into a String (or other
              * encodable object).
        -     *
        +     * 

        * For example: * *

        diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java
        index a2f16209a5b7f..4bfd47ccfdc94 100644
        --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java
        +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java
        @@ -44,7 +44,7 @@
         
         /**
          * Interface for pull - parsing {@link XContent} see {@code XContentType} for supported types.
        - *
        + * 

        * To obtain an instance of this class use the following pattern: * *

        @@ -202,11 +202,11 @@  Map map(Supplier> mapFactory, CheckedFunction
              * Default implementation simply returns false since only actual
              * implementation class has knowledge of its internal buffering
              * state.
        -     *
        +     * 

        * This method shouldn't be used to check if the token contains text or not. */ boolean hasTextCharacters(); diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParserUtils.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParserUtils.java index 13e2f6a695d1b..b10be393f9adb 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParserUtils.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParserUtils.java @@ -142,10 +142,10 @@ public static Object parseFieldsValue(XContentParser parser) throws IOException * This method expects that the current field name is the concatenation of a type, a delimiter and a name * (ex: terms#foo where "terms" refers to the type of a registered {@link NamedXContentRegistry.Entry}, * "#" is the delimiter and "foo" the name of the object to parse). - * + *

        * It also expected that following this field name is either an Object or an array xContent structure and * the cursor points to the start token of this structure. - * + *

        * The method splits the field's name to extract the type and name and then parses the object * using the {@link XContentParser#namedObject(Class, String, Object)} method. * diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentSubParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentSubParser.java index d1cdda4aeb8be..337cf9f95fe5f 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentSubParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentSubParser.java @@ -43,7 +43,7 @@ /** * Wrapper for a XContentParser that makes a single object/array look like a complete document. - * + *

        * The wrapper prevents the parsing logic to consume tokens outside of the wrapped object as well * as skipping to the end of the object in case of a parsing error. The wrapper is intended to be * used for parsing objects that should be ignored if they are malformed. diff --git a/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java b/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java index 9861847c9e1ea..b6dc0ceb1028f 100644 --- a/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java +++ b/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java @@ -194,26 +194,25 @@ public DissectParser(String pattern, String appendSeparator) { * @throws DissectException if unable to dissect a pair into it's parts. */ public Map parse(String inputString) { - /** - * - * This implements a naive string matching algorithm. The string is walked left to right, comparing each byte against - * another string's bytes looking for matches. If the bytes match, then a second cursor looks ahead to see if all the bytes - * of the other string matches. If they all match, record it and advances the primary cursor to the match point. If it can not match - * all of the bytes then progress the main cursor. Repeat till the end of the input string. Since the string being searching for - * (the delimiter) is generally small and rare the naive approach is efficient. - * - * In this case the string that is walked is the input string, and the string being searched for is the current delimiter. - * For example for a dissect pattern of {@code %{a},%{b}:%{c}} the delimiters (comma then colon) are searched for in the - * input string. At class construction the list of keys+delimiters are found (dissectPairs), which allows the use of that ordered - * list to know which delimiter to use for the search. The delimiters is progressed once the current delimiter is matched. - * - * There are two special cases that requires additional parsing beyond the standard naive algorithm. Consecutive delimiters should - * results in a empty matches unless the {@code ->} is provided. For example given the dissect pattern of - * {@code %{a},%{b},%{c},%{d}} and input string of {@code foo,,,} the match should be successful with empty values for b,c and d. - * However, if the key modifier {@code ->}, is present it will simply skip over any delimiters just to the right of the key - * without assigning any values. For example {@code %{a->},{%b}} will match the input string of {@code foo,,,,,,bar} with a=foo and - * b=bar. - * + /* + + This implements a naive string matching algorithm. The string is walked left to right, comparing each byte against + another string's bytes looking for matches. If the bytes match, then a second cursor looks ahead to see if all the bytes + of the other string matches. If they all match, record it and advances the primary cursor to the match point. If it can not match + all of the bytes then progress the main cursor. Repeat till the end of the input string. Since the string being searching for + (the delimiter) is generally small and rare the naive approach is efficient. + + In this case the string that is walked is the input string, and the string being searched for is the current delimiter. + For example for a dissect pattern of {@code %{a},%{b}:%{c}} the delimiters (comma then colon) are searched for in the + input string. At class construction the list of keys+delimiters are found (dissectPairs), which allows the use of that ordered + list to know which delimiter to use for the search. The delimiters is progressed once the current delimiter is matched. + + There are two special cases that requires additional parsing beyond the standard naive algorithm. Consecutive delimiters should + results in a empty matches unless the {@code ->} is provided. For example given the dissect pattern of + {@code %{a},%{b},%{c},%{d}} and input string of {@code foo,,,} the match should be successful with empty values for b,c and d. + However, if the key modifier {@code ->}, is present it will simply skip over any delimiters just to the right of the key + without assigning any values. For example {@code %{a->},{%b}} will match the input string of {@code foo,,,,,,bar} with a=foo and + b=bar. */ DissectMatch dissectMatch = new DissectMatch(appendSeparator, maxMatches, maxResults, appendCount, referenceCount); Iterator it = matchPairs.iterator(); diff --git a/libs/geo/src/main/java/org/opensearch/geometry/Circle.java b/libs/geo/src/main/java/org/opensearch/geometry/Circle.java index cf1dce8966e1f..c05f316b53b9c 100644 --- a/libs/geo/src/main/java/org/opensearch/geometry/Circle.java +++ b/libs/geo/src/main/java/org/opensearch/geometry/Circle.java @@ -180,6 +180,7 @@ public int hashCode() { /** * Visit this circle with a {@link GeometryVisitor}. + * * @param visitor The visitor * @param The return type of the visitor * @param The exception type of the visitor diff --git a/libs/geo/src/main/java/org/opensearch/geometry/utils/BitUtil.java b/libs/geo/src/main/java/org/opensearch/geometry/utils/BitUtil.java index 664e7e68d96a5..c946cc2473202 100644 --- a/libs/geo/src/main/java/org/opensearch/geometry/utils/BitUtil.java +++ b/libs/geo/src/main/java/org/opensearch/geometry/utils/BitUtil.java @@ -48,8 +48,8 @@ public class BitUtil { // magic numbers for bit interleaving /** * Interleaves the first 32 bits of each long value - * - * Adapted from: http://graphics.stanford.edu/~seander/bithacks.html#InterleaveBMN + *

        + * Adapted from: bithacks.html#InterleaveBMN */ public static long interleave(int even, int odd) { long v1 = 0x00000000FFFFFFFFL & even; diff --git a/libs/geo/src/main/java/org/opensearch/geometry/utils/Geohash.java b/libs/geo/src/main/java/org/opensearch/geometry/utils/Geohash.java index 8b3b841e221e5..33c423e136613 100644 --- a/libs/geo/src/main/java/org/opensearch/geometry/utils/Geohash.java +++ b/libs/geo/src/main/java/org/opensearch/geometry/utils/Geohash.java @@ -39,12 +39,12 @@ /** * Utilities for converting to/from the GeoHash standard - * + *

        * The geohash long format is represented as lon/lat (x/y) interleaved with the 4 least significant bits * representing the level (1-12) [xyxy...xyxyllll] - * + *

        * This differs from a morton encoded value which interleaves lat/lon (y/x). - * + *

        * NOTE: this will replace {@code org.opensearch.common.geo.GeoHashUtils} */ public class Geohash { diff --git a/libs/grok/src/main/java/org/opensearch/grok/Grok.java b/libs/grok/src/main/java/org/opensearch/grok/Grok.java index cd786b74be039..7aa3347ba4f4b 100644 --- a/libs/grok/src/main/java/org/opensearch/grok/Grok.java +++ b/libs/grok/src/main/java/org/opensearch/grok/Grok.java @@ -151,7 +151,7 @@ private void validatePatternBank() { /** * Checks whether patterns reference each other in a circular manner and, if so, fail with an exception. * Also checks for malformed pattern definitions and fails with an exception. - * + *

        * In a pattern, anything between %{ and } or : is considered * a reference to another named pattern. This method will navigate to all these named patterns and * check for a circular reference. diff --git a/libs/grok/src/main/java/org/opensearch/grok/MatcherWatchdog.java b/libs/grok/src/main/java/org/opensearch/grok/MatcherWatchdog.java index 5c7eaca2a634a..d5b7566ecc90f 100644 --- a/libs/grok/src/main/java/org/opensearch/grok/MatcherWatchdog.java +++ b/libs/grok/src/main/java/org/opensearch/grok/MatcherWatchdog.java @@ -44,7 +44,7 @@ * Protects against long running operations that happen between the register and unregister invocations. * Threads that invoke {@link #register(Matcher)}, but take too long to invoke the {@link #unregister(Matcher)} method * will be interrupted. - * + *

        * This is needed for Joni's {@link org.joni.Matcher#search(int, int, int)} method, because * it can end up spinning endlessly if the regular expression is too complex. Joni has checks * that for every 30k iterations it checks if the current thread is interrupted and if so diff --git a/libs/nio/src/main/java/org/opensearch/nio/ChannelContext.java b/libs/nio/src/main/java/org/opensearch/nio/ChannelContext.java index 797dfe859fa6c..0e29661978716 100644 --- a/libs/nio/src/main/java/org/opensearch/nio/ChannelContext.java +++ b/libs/nio/src/main/java/org/opensearch/nio/ChannelContext.java @@ -116,10 +116,10 @@ protected void handleException(Exception e) { /** * Schedules a channel to be closed by the selector event loop with which it is registered. - * + *

        * If the channel is open and the state can be transitioned to closed, the close operation will * be scheduled with the event loop. - * + *

        * Depending on the underlying protocol of the channel, a close operation might simply close the socket * channel or may involve reading and writing messages. */ diff --git a/libs/nio/src/main/java/org/opensearch/nio/NioSelector.java b/libs/nio/src/main/java/org/opensearch/nio/NioSelector.java index a38a33182afea..4ed745723515c 100644 --- a/libs/nio/src/main/java/org/opensearch/nio/NioSelector.java +++ b/libs/nio/src/main/java/org/opensearch/nio/NioSelector.java @@ -512,12 +512,12 @@ private void handleQueuedWrites() { * This is a convenience method to be called after some object (normally channels) are enqueued with this * selector. This method will check if the selector is still open. If it is open, normal operation can * proceed. - * + *

        * If the selector is closed, then we attempt to remove the object from the queue. If the removal * succeeds then we throw an {@link IllegalStateException} indicating that normal operation failed. If * the object cannot be removed from the queue, then the object has already been handled by the selector * and operation can proceed normally. - * + *

        * If this method is called from the selector thread, we will not allow the queuing to occur as the * selector thread can manipulate its queues internally even if it is no longer open. * diff --git a/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java b/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java index 12a1e80055823..3df8e42fe4f14 100644 --- a/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java +++ b/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java @@ -59,7 +59,7 @@ * that it is ready to perform certain operations (read, write, etc) the {@link SocketChannelContext} will * be called. This context will need to implement all protocol related logic. Additionally, if any special * close behavior is required, it should be implemented in this context. - * + *

        * The only methods of the context that should ever be called from a non-selector thread are * {@link #closeChannel()} and {@link #sendMessage(Object, BiConsumer)}. */ diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java index 49295f417df9e..e6d4878a5e833 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java @@ -16,7 +16,7 @@ /** * Tracer is the interface used to create a {@link Span} * It automatically handles the context propagation between threads, tasks, nodes etc. - * + *

        * All methods on the Tracer object are multi-thread safe. * * @opensearch.experimental diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java index b0692f1b62a48..50d18c0a0d040 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java @@ -18,7 +18,7 @@ /** * HttpTracer helps in creating a {@link Span} which reads the incoming tracing information * from the HttpRequest header and propagate the span accordingly. - * + *

        * All methods on the Tracer object are multi-thread safe. * * @opensearch.experimental diff --git a/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java index 3b857a2c690b9..d3d9ea174cf1b 100644 --- a/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java @@ -628,7 +628,7 @@ public void testCreateRootSubParser() throws IOException { /** * Generates a random object {"first_field": "foo", "marked_field": {...random...}, "last_field": "bar} - * + *

        * Returns the number of tokens in the marked field */ private static int generateRandomObjectForMarking(XContentBuilder builder) throws IOException { diff --git a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/RunningStats.java b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/RunningStats.java index de67cc2930652..de6b59b1546a5 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/RunningStats.java +++ b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/RunningStats.java @@ -46,7 +46,7 @@ /** * Descriptive stats gathered per shard. Coordinating node computes final correlation and covariance stats * based on these descriptive stats. This single pass, parallel approach is based on: - * + *

        * http://prod.sandia.gov/techlib/access-control.cgi/2008/086212.pdf */ public class RunningStats implements Writeable, Cloneable { @@ -222,7 +222,7 @@ private void updateCovariance(final String[] fieldNames, final Map * running computations taken from: http://prod.sandia.gov/techlib/access-control.cgi/2008/086212.pdf **/ public void merge(final RunningStats other) { diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java index b0d9c1765190a..edb8c37c2dbdd 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java @@ -555,7 +555,7 @@ public List getPreConfiguredTokenFilters() { filters.add(PreConfiguredTokenFilter.singleton("scandinavian_normalization", true, ScandinavianNormalizationFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("shingle", false, false, input -> { TokenStream ts = new ShingleFilter(input); - /** + /* * We disable the graph analysis on this token stream * because it produces shingles of different size. * Graph analysis on such token stream is useless and dangerous as it may create too many paths diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/KeywordMarkerTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/KeywordMarkerTokenFilterFactory.java index ad968aeee62cb..e9f3fd96dd69d 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/KeywordMarkerTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/KeywordMarkerTokenFilterFactory.java @@ -49,12 +49,12 @@ * A factory for creating keyword marker token filters that prevent tokens from * being modified by stemmers. Two types of keyword marker filters are available: * the {@link SetKeywordMarkerFilter} and the {@link PatternKeywordMarkerFilter}. - * + *

        * The {@link SetKeywordMarkerFilter} uses a set of keywords to denote which tokens * should be excluded from stemming. This filter is created if the settings include * {@code keywords}, which contains the list of keywords, or {@code `keywords_path`}, * which contains a path to a file in the config directory with the keywords. - * + *

        * The {@link PatternKeywordMarkerFilter} uses a regular expression pattern to match * against tokens that should be excluded from stemming. This filter is created if * the settings include {@code keywords_pattern}, which contains the regular expression diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SnowballAnalyzer.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SnowballAnalyzer.java index 78d151ee16c3b..04786689b50f0 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SnowballAnalyzer.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SnowballAnalyzer.java @@ -45,7 +45,7 @@ /** Filters {@link StandardTokenizer} with {@link * LowerCaseFilter}, {@link StopFilter} and {@link SnowballFilter}. - * + *

        * Available stemmers are listed in org.tartarus.snowball.ext. The name of a * stemmer is the part of the class name before "Stemmer", e.g., the stemmer in * {@link org.tartarus.snowball.ext.EnglishStemmer} is named "English". diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java index 9149b8939b739..665ea6c5f2f37 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java @@ -48,7 +48,7 @@ /** * A {@link SingleDimensionValuesSource} for geotile values. - * + *

        * Since geotile values can be represented as long values, this class is almost the same as {@link LongValuesSource} * The main differences is {@link GeoTileValuesSource#setAfter(Comparable)} as it needs to accept geotile string values i.e. "zoom/x/y". * diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/cells/BoundedCellValues.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/cells/BoundedCellValues.java index 588c8bc59c2e0..6ff38fa28978e 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/cells/BoundedCellValues.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/cells/BoundedCellValues.java @@ -37,7 +37,7 @@ /** * Class representing {@link CellValues} whose values are filtered * according to whether they are within the specified {@link GeoBoundingBox}. - * + *

        * The specified bounding box is assumed to be bounded. * * @opensearch.internal diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FailProcessorException.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FailProcessorException.java index 37320c0e900a5..7e114023fb86f 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FailProcessorException.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FailProcessorException.java @@ -37,7 +37,7 @@ /** * Exception class thrown by {@link FailProcessor}. - * + *

        * This exception is caught in the {@link CompoundProcessor} and * then changes the state of {@link IngestDocument}. This * exception should get serialized. diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ForEachProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ForEachProcessor.java index 741a4fb29cfb8..b7c417f5f44a5 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ForEachProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ForEachProcessor.java @@ -53,10 +53,10 @@ /** * A processor that for each value in a list executes a one or more processors. - * + *

        * This can be useful in cases to do string operations on json array of strings, * or remove a field from objects inside a json array. - * + *

        * Note that this processor is experimental. */ public final class ForEachProcessor extends AbstractProcessor implements WrappingProcessor { diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java index 035d2402857e0..11b2e20eea523 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java @@ -73,7 +73,7 @@ /** * Provides the infrastructure for Lucene expressions as a scripting language for OpenSearch. - * + *

        * Only contexts returning numeric types or {@link Object} are supported. */ public class ExpressionScriptEngine implements ScriptEngine { diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java index f4d7198dc2124..ec84475b70bb6 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java @@ -59,7 +59,7 @@ /** * Main entry point handling template registration, compilation and * execution. - * + *

        * Template handling is based on Mustache. Template handling is a two step * process: First compile the string representing the template, the resulting * {@link Mustache} object can then be re-used for subsequent executions. diff --git a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheScriptEngineTests.java b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheScriptEngineTests.java index 9e97863306148..fbb7d09709a91 100644 --- a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheScriptEngineTests.java +++ b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheScriptEngineTests.java @@ -200,7 +200,7 @@ private String getChars() { /** * From https://www.ietf.org/rfc/rfc4627.txt: - * + *

        * All Unicode characters may be placed within the * quotation marks except for the characters that must be escaped: * quotation mark, reverse solidus, and the control characters (U+0000 diff --git a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/Allowlist.java b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/Allowlist.java index 56ade63efa5e2..265263b41ca89 100644 --- a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/Allowlist.java +++ b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/Allowlist.java @@ -42,7 +42,7 @@ * Allowlist contains data structures designed to be used to generate an allowlist of Java classes, * constructors, methods, and fields that can be used within a Painless script at both compile-time * and run-time. - * + *

        * A Allowlist consists of several pieces with {@link AllowlistClass}s as the top level. Each * {@link AllowlistClass} will contain zero-to-many {@link AllowlistConstructor}s, {@link AllowlistMethod}s, and * {@link AllowlistField}s which are what will be available with a Painless script. See each individual diff --git a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistClass.java b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistClass.java index 17e6814addf3b..67f5a07846c53 100644 --- a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistClass.java +++ b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistClass.java @@ -45,12 +45,12 @@ * classes. Though, since multiple allowlists may be combined into a single allowlist for a * specific context, as long as multiple classes representing the same Java class have the same * class name and have legal constructor/method overloading they can be merged together. - * + *

        * Classes in Painless allow for arity overloading for constructors and methods. Arity overloading * means that multiple constructors are allowed for a single class as long as they have a different * number of parameters, and multiples methods with the same name are allowed for a single class * as long as they have the same return type and a different number of parameters. - * + *

        * Classes will automatically extend other allowlisted classes if the Java class they represent is a * subclass of other classes including Java interfaces. */ diff --git a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java index 71265f82acacc..632fee9187eba 100644 --- a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java +++ b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java @@ -67,11 +67,11 @@ public static Allowlist loadFromResourceFiles(Class resource, String... filep * is the path of a single text file. The {@link Class}'s {@link ClassLoader} will be used to lookup the Java * reflection objects for each individual {@link Class}, {@link Constructor}, {@link Method}, and {@link Field} * specified as part of the allowlist in the text file. - * + *

        * A single pass is made through each file to collect all the information about each class, constructor, method, * and field. Most validation will be done at a later point after all allowlists have been gathered and their * merging takes place. - * + *

        * A painless type name is one of the following: *

          *
        • def - The Painless dynamic type which is automatically included without a need to be @@ -129,13 +129,13 @@ public static Allowlist loadFromResourceFiles(Class resource, String... filep * be appropriately parsed and handled. Painless complex types must be specified with the * fully-qualified Java class name. Method argument types, method return types, and field types * must be specified with Painless type names (def, fully-qualified, or short) as described earlier. - * + *

          * The following example is used to create a single allowlist text file: * - * {@code + *

                * # primitive types
                *
          -     * class int -> int {
          +     * class int -> int {
                * }
                *
                * # complex types
          @@ -161,7 +161,7 @@ public static Allowlist loadFromResourceFiles(Class resource, String... filep
                *   int value1
                *   def value2
                * }
          -     * }
          +     * 
          */ public static Allowlist loadFromResourceFiles(Class resource, Map parsers, String... filepaths) { List allowlistClasses = new ArrayList<>(); diff --git a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistMethod.java b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistMethod.java index 8bb0231ff3f4f..9fcaec3dbf7b6 100644 --- a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistMethod.java +++ b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistMethod.java @@ -45,7 +45,7 @@ * are using the '.' operator on an existing class variable/field. Painless classes may have multiple * methods with the same name as long as they comply with arity overloading described in * {@link AllowlistClass}. - * + *

          * Classes may also have additional methods that are not part of the Java class the class represents - * these are known as augmented methods. An augmented method can be added to a class as a part of any * Java class as long as the method is static and the first parameter of the method is the Java class diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java index 35c676653fdc3..c19d4f361b2b6 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java @@ -73,9 +73,7 @@ final class Compiler { */ private static final CodeSource CODESOURCE; - /** - * Setup the code privileges. - */ + /* Setup the code privileges. */ static { try { // Setup the code privileges. diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupUtility.java b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupUtility.java index cae425ad1fe3b..3164f5e6388c7 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupUtility.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupUtility.java @@ -42,13 +42,13 @@ /** * PainlessLookupUtility contains methods shared by {@link PainlessLookupBuilder}, {@link PainlessLookup}, and other classes within * Painless for conversion between type names and types along with some other various utility methods. - * + *

          * The following terminology is used for variable names throughout the lookup package: - * + *

          * A class is a set of methods and fields under a specific class name. A type is either a class or an array under a specific type name. * Note the distinction between class versus type is class means that no array classes will be be represented whereas type allows array * classes to be represented. The set of available classes will always be a subset of the available types. - * + *

          * Under ambiguous circumstances most variable names are prefixed with asm, java, or painless. If the variable value is the same for asm, * java, and painless, no prefix is used. Target is used as a prefix to represent if a constructor, method, or field is being * called/accessed on that specific class. Parameter is often a postfix used to represent if a type is used as a parameter to a diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/phase/PainlessSemanticAnalysisPhase.java b/modules/lang-painless/src/main/java/org/opensearch/painless/phase/PainlessSemanticAnalysisPhase.java index 04165f44ba212..8a05d6742af97 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/phase/PainlessSemanticAnalysisPhase.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/phase/PainlessSemanticAnalysisPhase.java @@ -126,9 +126,9 @@ public void visitFunction(SFunction userFunctionNode, ScriptScope scriptScope) { /** * Visits an expression that is also considered a statement. - * + *

          * If the statement is a return from the execute method, performs return value conversion. - * + *

          * Checks: control flow, type validation */ @Override @@ -168,9 +168,9 @@ public void visitExpression(SExpression userExpressionNode, SemanticScope semant /** * Visits a return statement and casts the value to the return type if possible. - * + *

          * If the statement is a return from the execute method, performs return value conversion. - * + *

          * Checks: type validation */ @Override diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/symbol/SemanticScope.java b/modules/lang-painless/src/main/java/org/opensearch/painless/symbol/SemanticScope.java index e27530d745e8f..5ac802038afa6 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/symbol/SemanticScope.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/symbol/SemanticScope.java @@ -49,7 +49,7 @@ * Tracks information within a scope required for compilation during the * semantic phase in the user tree. There are three types of scopes - * {@link FunctionScope}, {@link LambdaScope}, and {@link BlockScope}. - * + *

          * Scopes are stacked as they are created during the user tree's semantic * phase with each scope beyond the top-level containing a reference to * its parent. As a scope is no longer necessary, it's dropped automatically diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java index 397a7b48b472a..366e848416328 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java @@ -82,7 +82,7 @@ /** * Mapper for a text field that optimizes itself for as-you-type completion by indexing its content into subfields. Each subfield * modifies the analysis chain of the root field to index terms the user would create as they type out the value in the root field - * + *

          * The structure of these fields is * *

          diff --git a/modules/parent-join/src/main/java/org/opensearch/join/query/HasChildQueryBuilder.java b/modules/parent-join/src/main/java/org/opensearch/join/query/HasChildQueryBuilder.java
          index 1a51259e9e4e4..e930780613ed6 100644
          --- a/modules/parent-join/src/main/java/org/opensearch/join/query/HasChildQueryBuilder.java
          +++ b/modules/parent-join/src/main/java/org/opensearch/join/query/HasChildQueryBuilder.java
          @@ -373,7 +373,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException {
                * A query that rewrites into another query using
                * {@link JoinUtil#createJoinQuery(String, Query, Query, IndexSearcher, ScoreMode, OrdinalMap, int, int)}
                * that executes the actual join.
          -     *
          +     * 

          * This query is exclusively used by the {@link HasChildQueryBuilder} and {@link HasParentQueryBuilder} to get access * to the {@link DirectoryReader} used by the current search in order to retrieve the {@link OrdinalMap}. * The {@link OrdinalMap} is required by {@link JoinUtil} to execute the join. diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java index ffdea14e0873d..8e72c6ef06849 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java @@ -71,10 +71,10 @@ * supplied query parameters) against a set of possible search requests (read: * search specifications, expressed as query/search request templates) and * compares the result against a set of annotated documents per search intent. - * + *

          * If any documents are returned that haven't been annotated the document id of * those is returned per search intent. - * + *

          * The resulting search quality is computed in terms of precision at n and * returned for each search specification for the full set of search intents as * averaged precision at n. diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java index fcf1fa489f740..d96e3212e05a2 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java @@ -67,7 +67,7 @@ public class DiscountedCumulativeGainTests extends OpenSearchTestCase { /** * Assuming the docs are ranked in the following order: - * + *

          * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) * ------------------------------------------------------------------------------------------- * 1 | 3 | 7.0 | 1.0 | 7.0 | 7.0 |  @@ -76,7 +76,7 @@ public class DiscountedCumulativeGainTests extends OpenSearchTestCase { * 4 | 0 | 0.0 | 2.321928094887362 | 0.0 * 5 | 1 | 1.0 | 2.584962500721156 | 0.38685280723454163 * 6 | 2 | 3.0 | 2.807354922057604 | 1.0686215613240666 - * + *

          * dcg = 13.84826362927298 (sum of last column) */ public void testDCGAt() { @@ -91,20 +91,20 @@ public void testDCGAt() { DiscountedCumulativeGain dcg = new DiscountedCumulativeGain(); assertEquals(EXPECTED_DCG, dcg.evaluate("id", hits, rated).metricScore(), DELTA); - /** - * Check with normalization: to get the maximal possible dcg, sort documents by - * relevance in descending order - * - * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) - * --------------------------------------------------------------------------------------- - * 1 | 3 | 7.0 | 1.0  | 7.0 - * 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 - * 3 | 2 | 3.0 | 2.0  | 1.5 - * 4 | 2 | 3.0 | 2.321928094887362 | 1.2920296742201793 - * 5 | 1 | 1.0 | 2.584962500721156  | 0.38685280723454163 - * 6 | 0 | 0.0 | 2.807354922057604  | 0.0 - * - * idcg = 14.595390756454922 (sum of last column) + /* + Check with normalization: to get the maximal possible dcg, sort documents by + relevance in descending order + + rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) + --------------------------------------------------------------------------------------- + 1 | 3 | 7.0 | 1.0  | 7.0 + 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 + 3 | 2 | 3.0 | 2.0  | 1.5 + 4 | 2 | 3.0 | 2.321928094887362 | 1.2920296742201793 + 5 | 1 | 1.0 | 2.584962500721156  | 0.38685280723454163 + 6 | 0 | 0.0 | 2.807354922057604  | 0.0 + + idcg = 14.595390756454922 (sum of last column) */ dcg = new DiscountedCumulativeGain(true, null, 10); assertEquals(EXPECTED_NDCG, dcg.evaluate("id", hits, rated).metricScore(), DELTA); @@ -113,7 +113,7 @@ public void testDCGAt() { /** * This tests metric when some documents in the search result don't have a * rating provided by the user. - * + *

          * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) * ------------------------------------------------------------------------------------------- * 1 | 3 | 7.0 | 1.0 | 7.0 2 |  @@ -122,7 +122,7 @@ public void testDCGAt() { * 4 | n/a | n/a | n/a | n/a * 5 | 1 | 1.0 | 2.584962500721156 | 0.38685280723454163 * 6 | n/a | n/a | n/a | n/a - * + *

          * dcg = 12.779642067948913 (sum of last column) */ public void testDCGAtSixMissingRatings() { @@ -143,20 +143,20 @@ public void testDCGAtSixMissingRatings() { assertEquals(12.779642067948913, result.metricScore(), DELTA); assertEquals(2, filterUnratedDocuments(result.getHitsAndRatings()).size()); - /** - * Check with normalization: to get the maximal possible dcg, sort documents by - * relevance in descending order - * - * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) - * ---------------------------------------------------------------------------------------- - * 1 | 3 | 7.0 | 1.0  | 7.0 - * 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 - * 3 | 2 | 3.0 | 2.0  | 1.5 - * 4 | 1 | 1.0 | 2.321928094887362   | 0.43067655807339 - * 5 | n.a | n.a | n.a.  | n.a. - * 6 | n.a | n.a | n.a  | n.a - * - * idcg = 13.347184833073591 (sum of last column) + /* + Check with normalization: to get the maximal possible dcg, sort documents by + relevance in descending order + + rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) + ---------------------------------------------------------------------------------------- + 1 | 3 | 7.0 | 1.0  | 7.0 + 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 + 3 | 2 | 3.0 | 2.0  | 1.5 + 4 | 1 | 1.0 | 2.321928094887362   | 0.43067655807339 + 5 | n.a | n.a | n.a.  | n.a. + 6 | n.a | n.a | n.a  | n.a + + idcg = 13.347184833073591 (sum of last column) */ dcg = new DiscountedCumulativeGain(true, null, 10); assertEquals(12.779642067948913 / 13.347184833073591, dcg.evaluate("id", hits, rated).metricScore(), DELTA); @@ -166,7 +166,7 @@ public void testDCGAtSixMissingRatings() { * This tests that normalization works as expected when there are more rated * documents than search hits because we restrict DCG to be calculated at the * fourth position - * + *

          * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) * ------------------------------------------------------------------------------------------- * 1 | 3 | 7.0 | 1.0 | 7.0 2 |  @@ -176,7 +176,7 @@ public void testDCGAtSixMissingRatings() { * ----------------------------------------------------------------- * 5 | 1 | 1.0 | 2.584962500721156 | 0.38685280723454163 * 6 | n/a | n/a | n/a | n/a - * + *

          * dcg = 12.392789260714371 (sum of last column until position 4) */ public void testDCGAtFourMoreRatings() { @@ -200,21 +200,21 @@ public void testDCGAtFourMoreRatings() { assertEquals(12.392789260714371, result.metricScore(), DELTA); assertEquals(1, filterUnratedDocuments(result.getHitsAndRatings()).size()); - /** - * Check with normalization: to get the maximal possible dcg, sort documents by - * relevance in descending order - * - * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) - * --------------------------------------------------------------------------------------- - * 1 | 3 | 7.0 | 1.0  | 7.0 - * 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 - * 3 | 2 | 3.0 | 2.0  | 1.5 - * 4 | 1 | 1.0 | 2.321928094887362   | 0.43067655807339 - * --------------------------------------------------------------------------------------- - * 5 | n.a | n.a | n.a.  | n.a. - * 6 | n.a | n.a | n.a  | n.a - * - * idcg = 13.347184833073591 (sum of last column) + /* + Check with normalization: to get the maximal possible dcg, sort documents by + relevance in descending order + + rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) + --------------------------------------------------------------------------------------- + 1 | 3 | 7.0 | 1.0  | 7.0 + 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 + 3 | 2 | 3.0 | 2.0  | 1.5 + 4 | 1 | 1.0 | 2.321928094887362   | 0.43067655807339 + --------------------------------------------------------------------------------------- + 5 | n.a | n.a | n.a.  | n.a. + 6 | n.a | n.a | n.a  | n.a + + idcg = 13.347184833073591 (sum of last column) */ dcg = new DiscountedCumulativeGain(true, null, 10); assertEquals(12.392789260714371 / 13.347184833073591, dcg.evaluate("id", hits, ratedDocs).metricScore(), DELTA); diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java index e2442e1f483f0..6ed486fbdb33b 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -206,7 +206,7 @@ public abstract class AbstractAsyncBulkByScrollAction< /** * Build the {@link BiFunction} to apply to all {@link RequestWrapper}. - * + *

          * Public for testings.... */ public BiFunction, ScrollableHitSource.Hit, RequestWrapper> buildScriptApplier() { diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkByScrollParallelizationHelper.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkByScrollParallelizationHelper.java index d5a6e392f2019..7534de1408bcc 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkByScrollParallelizationHelper.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkByScrollParallelizationHelper.java @@ -63,14 +63,14 @@ private BulkByScrollParallelizationHelper() {} /** * Takes an action created by a {@link BulkByScrollTask} and runs it with regard to whether the request is sliced or not. - * + *

          * If the request is not sliced (i.e. the number of slices is 1), the worker action in the given {@link Runnable} will be started on * the local node. If the request is sliced (i.e. the number of slices is more than 1), then a subrequest will be created for each * slice and sent. - * + *

          * If slices are set as {@code "auto"}, this class will resolve that to a specific number based on characteristics of the source * indices. A request with {@code "auto"} slices may end up being sliced or unsliced. - * + *

          * This method is equivalent to calling {@link #initTaskState} followed by {@link #executeSlicedAction} */ static > void startSlicedAction( @@ -98,11 +98,11 @@ public void onFailure(Exception e) { /** * Takes an action and a {@link BulkByScrollTask} and runs it with regard to whether this task is a * leader or worker. - * + *

          * If this task is a worker, the worker action in the given {@link Runnable} will be started on the local * node. If the task is a leader (i.e. the number of slices is more than 1), then a subrequest will be * created for each slice and sent. - * + *

          * This method can only be called after the task state is initialized {@link #initTaskState}. */ static > void executeSlicedAction( @@ -125,7 +125,7 @@ static > void executeSliced /** * Takes a {@link BulkByScrollTask} and ensures that its initial task state (leader or worker) is set. - * + *

          * If slices are set as {@code "auto"}, this method will resolve that to a specific number based on * characteristics of the source indices. A request with {@code "auto"} slices may end up being sliced or * unsliced. This method does not execute the action. In order to execute the action see diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java index 6c8ca665424a6..826d4a7e5d61e 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java @@ -57,7 +57,7 @@ /** * This test checks that in-flight requests are limited on HTTP level and that requests that are excluded from limiting can pass. - * + *

          * As the same setting is also used to limit in-flight requests on transport level, we avoid transport messages by forcing * a single node "cluster". */ diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequest.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequest.java index 7d937157c1034..3c96affb7adf7 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequest.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequest.java @@ -258,7 +258,7 @@ public FullHttpRequest nettyRequest() { /** * A wrapper of {@link HttpHeaders} that implements a map to prevent copying unnecessarily. This class does not support modifications * and due to the underlying implementation, it performs case insensitive lookups of key to values. - * + *

          * It is important to note that this implementation does have some downsides in that each invocation of the * {@link #values()} and {@link #entrySet()} methods will perform a copy of the values in the HttpHeaders rather than returning a * view of the underlying values. diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesSocketChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesSocketChannel.java index b73f74d187a2a..4bab91565d3ad 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesSocketChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesSocketChannel.java @@ -65,7 +65,7 @@ * This class is adapted from {@link NioSocketChannel} class in the Netty project. It overrides the channel * read/write behavior to ensure that the bytes are always copied to a thread-local direct bytes buffer. This * happens BEFORE the call to the Java {@link SocketChannel} is issued. - * + *

          * The purpose of this class is to allow the disabling of netty direct buffer pooling while allowing us to * control how bytes end up being copied to direct memory. If we simply disabled netty pooling, we would rely * on the JDK's internal thread local buffer pooling. Instead, this class allows us to create a one thread diff --git a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/HaasePhonetik.java b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/HaasePhonetik.java index c80d32228feeb..b875fab7d4006 100644 --- a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/HaasePhonetik.java +++ b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/HaasePhonetik.java @@ -35,13 +35,13 @@ /** * Geänderter Algorithmus aus der Matching Toolbox von Rainer Schnell * Java-Programmierung von Jörg Reiher - * + *

          * Die Kölner Phonetik wurde für den Einsatz in Namensdatenbanken wie * der Verwaltung eines Krankenhauses durch Martin Haase (Institut für * Sprachwissenschaft, Universität zu Köln) und Kai Heitmann (Insitut für * medizinische Statistik, Informatik und Epidemiologie, Köln) überarbeitet. * M. Haase und K. Heitmann. Die Erweiterte Kölner Phonetik. 526, 2000. - * + *

          * nach: Martin Wilz, Aspekte der Kodierung phonetischer Ähnlichkeiten * in deutschen Eigennamen, Magisterarbeit. * http://www.uni-koeln.de/phil-fak/phonetik/Lehre/MA-Arbeiten/magister_wilz.pdf diff --git a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/KoelnerPhonetik.java b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/KoelnerPhonetik.java index 33e386af9f364..4bcc10ff73b0a 100644 --- a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/KoelnerPhonetik.java +++ b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/KoelnerPhonetik.java @@ -46,13 +46,13 @@ /** * Kölner Phonetik - * + *

          * H.J. Postel, Die Kölner Phonetik. Ein Verfahren zu Identifizierung * von Personennamen auf der Grundlage der Gestaltanalyse. IBM-Nachrichten 19 (1969), 925-931 - * + *

          * Algorithmus aus der Matching Toolbox von Rainer Schnell * Java-Programmierung von Jörg Reiher - * + *

          * mit Änderungen von Jörg Prante * */ diff --git a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/Nysiis.java b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/Nysiis.java index 818dbba85e2de..c3237114c65d5 100644 --- a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/Nysiis.java +++ b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/Nysiis.java @@ -40,7 +40,7 @@ /** * * Taken from commons-codec trunk (unreleased yet) - * + *

          * Encodes a string into a NYSIIS value. NYSIIS is an encoding used to relate * similar names, but can also be used as a general purpose scheme to find word * with similar phonemes. diff --git a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java index b4af9773f33de..2208c78bef67a 100644 --- a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java +++ b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java @@ -66,7 +66,6 @@ * compute/v1/projects/[project-id]/zones/[zone] * * By default, project-id is the test method name, lowercase and missing the "test" prefix. - * * For example, if you create a test `myNewAwesomeTest` with following settings: * * Settings nodeSettings = Settings.builder() @@ -75,7 +74,6 @@ * .build(); * * You need to create a file under `src/test/resources/org/opensearch/discovery/gce/` named: - * * compute/v1/projects/mynewawesometest/zones/europe-west1-b/instances.json * */ diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java index 7763b1e42d63e..00b55eb75995c 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java @@ -27,12 +27,12 @@ public abstract class BasePerFieldCorrelationVectorsFormat extends PerFieldKnnVectorsFormat { /** * the hyper-parameters for constructing HNSW graphs. - * https://lucene.apache.org/core/9_4_0/core/org/apache/lucene/util/hnsw/HnswGraph.html + * HnswGraph.html */ public static final String METHOD_PARAMETER_M = "m"; /** * the hyper-parameters for constructing HNSW graphs. - * https://lucene.apache.org/core/9_4_0/core/org/apache/lucene/util/hnsw/HnswGraph.html + * HnswGraph.html */ public static final String METHOD_PARAMETER_EF_CONSTRUCTION = "ef_construction"; diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java index 134c6519d7220..3e567d0c04e53 100644 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java @@ -203,6 +203,7 @@ public void testDoToQueryInvalidFieldType() { /** * test serialization of Correlation Query Builder + * @throws Exception throws an IOException if serialization fails * @throws Exception Exception */ public void testSerialization() throws Exception { diff --git a/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java b/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java index 9255d3a2f299e..c8e9d8cfc0a12 100644 --- a/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java +++ b/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java @@ -53,7 +53,6 @@ public class CustomSuggestion extends Suggest.Suggestion /** * An integer representing the type of the suggestion formerly used for internal serialization over the network. - * * This class is now serialized as a NamedWriteable and this value only remains for backwards compatibility */ public static final int TYPE = 999; @@ -106,7 +105,7 @@ public int getWriteableType() { /** * A meaningless value used to test that plugin suggesters can add fields to their Suggestion types - * + *

          * This can't be serialized to xcontent because Suggestions appear in xcontent as an array of entries, so there is no place * to add a custom field. But we can still use a custom field internally and use it to define a Suggestion's behavior * diff --git a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedClass.java b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedClass.java index ca58d97377478..ee04741eb5a03 100644 --- a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedClass.java +++ b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedClass.java @@ -34,7 +34,7 @@ /** * An example of a class to be allowlisted for use by painless scripts - * + *

          * Each of the members and methods below are allowlisted for use in search scripts. * See example_allowlist.txt. */ diff --git a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedInstance.java b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedInstance.java index 0833f1f0c6659..35385e98187ea 100644 --- a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedInstance.java +++ b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedInstance.java @@ -34,7 +34,7 @@ /** * An example of an instance to be allowlisted for use by painless scripts. - * + *

          * Each of the members and methods below are allowlisted for use in search scripts but only from this instance. */ public class ExampleAllowlistedInstance { diff --git a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExamplePainlessAnnotation.java b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExamplePainlessAnnotation.java index 428a0cc688ad3..ac42c2b1cf9bc 100644 --- a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExamplePainlessAnnotation.java +++ b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExamplePainlessAnnotation.java @@ -34,7 +34,7 @@ /** * An example of an annotation to be allowlisted for use by painless scripts - * + *

          * The annotation below is allowlisted for use in search scripts. * See example_allowlist.txt. */ diff --git a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleStaticMethodClass.java b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleStaticMethodClass.java index 25443977a0f66..2f6d1ad9349bc 100644 --- a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleStaticMethodClass.java +++ b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleStaticMethodClass.java @@ -34,7 +34,7 @@ /** * An example of a class with static methods to be allowlisted for use by painless scripts - * + *

          * The method below is allowlisted for use in search scripts. * See example_allowlist.txt. */ diff --git a/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index 935887261dcc9..952cff96860f2 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -73,9 +73,9 @@ import java.util.regex.Pattern; /** A {@link FieldMapper} for full-text fields with annotation markup e.g. - * + *

          * "New mayor is [John Smith](type=person&value=John%20Smith) " - * + *

          * A special Analyzer wraps the default choice of analyzer in order * to strip the text field of annotation markup and inject the related * entity annotation tokens as supplementary tokens at the relevant points diff --git a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java index 82364d1b7b3c1..986720ec431fe 100644 --- a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -154,7 +154,7 @@ private static class AzureBlobStoreHttpHandler extends AzureHttpHandler implemen /** * HTTP handler that injects random Azure service errors - * + *

          * Note: it is not a good idea to allow this handler to simulate too many errors as it would * slow down the test suite. */ diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java index 9dcc312f8f5a7..b60701ba5e533 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java @@ -212,8 +212,8 @@ protected PasswordAuthentication getPasswordAuthentication() { /** * The location mode is not there in v12 APIs anymore but it is possible to mimic its semantics using - * retry options and combination of primary / secondary endpoints. Refer to migration guide for mode details: - * https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/storage/azure-storage-blob/migrationGuides/V8_V12.md#miscellaneous + * retry options and combination of primary / secondary endpoints. Refer to + * migration guide for mode details: */ private BlobServiceClientBuilder applyLocationMode(final BlobServiceClientBuilder builder, final AzureStorageSettings settings) { final StorageConnectionString storageConnectionString = StorageConnectionString.create(settings.getConnectString(), logger); @@ -335,8 +335,8 @@ private void closeInternally(ClientState state) { } /** - * Implements HTTP pipeline policy to collect statistics on API calls. See please: - * https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/storage/azure-storage-blob/migrationGuides/V8_V12.md#miscellaneous + * Implements HTTP pipeline policy to collect statistics on API calls. See : + * migration guide */ private static class HttpStatsPolicy implements HttpPipelinePolicy { private final BiConsumer statsCollector; diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java index 264888bb7da3a..bb0eafc7d1d4a 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java @@ -451,7 +451,8 @@ private static RequestRetryPolicy requestRetryOptions(BlobServiceClient client) } /** - * Extract the blob name from a URI like https://myservice.azure.net/container/path/to/myfile + * Extract the blob name from a URI like : + * {@code https://myservice.azure.net/container/path/to/myfile } * It should remove the container part (first part of the path) and gives path/to/myfile * @param uri URI to parse * @return The blob name relative to the container diff --git a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index f8e1a1cc39ae0..d223f7989c688 100644 --- a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -298,7 +298,7 @@ private static class GoogleCloudStorageBlobStoreHttpHandler extends GoogleCloudS /** * HTTP handler that injects random Google Cloud Storage service errors - * + *

          * Note: it is not a good idea to allow this handler to simulate too many errors as it would * slow down the test suite. */ diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java index 068adc2cc9991..07d1d29eecfc4 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java @@ -53,7 +53,7 @@ /** * Oversees all the security specific logic for the HDFS Repository plugin. - * + *

          * Keeps track of the current user for a given repository, as well as which * permissions to grant the blob store restricted execution methods. */ diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java index 2758bd020e979..856cdf1eb565e 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java @@ -42,7 +42,7 @@ * thread leaks out of the client and is picked up by the test framework. This thread filter is meant * to ignore the offending thread until a version of Hadoop is released that addresses the incorrect * interrupt handling. - * + *

          * In Hadoop 3.3.6, the org.apache.hadoop.fs.statistics.impl.EvaluatingStatisticsMap uses ForkJoinPool * to perform statistics calculation, leaving dangling workers. * diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index 4e8e7720572e8..5f88ad7867513 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -225,7 +225,7 @@ private void validateAuthHeader(HttpExchange exchange) { /** * HTTP handler that injects random S3 service errors - * + *

          * Note: it is not a good idea to allow this handler to simulate too many errors as it would * slow down the test suite. */ diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java index 933136228b1bb..2392c66329e06 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java @@ -446,7 +446,7 @@ S3ClientSettings refine(Settings repositorySettings) { /** * Load all client settings from the given settings. - * + *

          * Note this will always at least return a client named "default". */ static Map load(final Settings settings, final Path configPath) { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java index 3a35f6135f28b..d7e47e0ab1bcc 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java @@ -54,7 +54,7 @@ * Wrapper around an S3 object that will retry the {@link GetObjectRequest} if the download fails part-way through, resuming from where * the failure occurred. This should be handled by the SDK but it isn't today. This should be revisited in the future (e.g. before removing * the {@code LegacyESVersion#V_7_0_0} version constant) and removed when the SDK handles retries itself. - * + *

          * See https://github.com/aws/aws-sdk-java/issues/856 for the related SDK issue */ class S3RetryingInputStream extends InputStream { diff --git a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java index 2cac58262c75a..e1655cc5e0784 100644 --- a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java +++ b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java @@ -47,9 +47,9 @@ protected Directory getDirectory(Path file) throws IOException { @Override public void testCreateOutputForExistingFile() throws IOException { - /** - * This test is disabled because {@link SmbDirectoryWrapper} opens existing file - * with an explicit StandardOpenOption.TRUNCATE_EXISTING option. + /* + This test is disabled because {@link SmbDirectoryWrapper} opens existing file + with an explicit StandardOpenOption.TRUNCATE_EXISTING option. */ } } diff --git a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java index 7390759029dfc..6f821147c3079 100644 --- a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java +++ b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java @@ -26,9 +26,9 @@ protected Directory getDirectory(Path file) throws IOException { @Override public void testCreateOutputForExistingFile() throws IOException { - /** - * This test is disabled because {@link SmbDirectoryWrapper} opens existing file - * with an explicit StandardOpenOption.TRUNCATE_EXISTING option. + /* + This test is disabled because {@link SmbDirectoryWrapper} opens existing file + with an explicit StandardOpenOption.TRUNCATE_EXISTING option. */ } } diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java index d25ef33c2ce29..5abd6f2710198 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java @@ -257,7 +257,7 @@ public FullHttpRequest nettyRequest() { /** * A wrapper of {@link HttpHeaders} that implements a map to prevent copying unnecessarily. This class does not support modifications * and due to the underlying implementation, it performs case insensitive lookups of key to values. - * + *

          * It is important to note that this implementation does have some downsides in that each invocation of the * {@link #values()} and {@link #entrySet()} methods will perform a copy of the values in the HttpHeaders rather than returning a * view of the underlying values. diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java index aabc3aee8887f..2675e9b62de35 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java @@ -78,7 +78,7 @@ * PercolatorFieldMapper#createQueryBuilderField(...) method). Using the query builders writable contract. This test * does best effort verifying that we don't break bwc for query builders between the first previous major version and * the latest current major release. - * + *

          * The queries to test are specified in json format, which turns out to work because we tend break here rarely. If the * json format of a query being tested here then feel free to change this. */ diff --git a/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java b/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java index c3c332aecfd4c..8ca90791f649e 100644 --- a/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java +++ b/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java @@ -65,7 +65,7 @@ /** * Create a simple "daemon controller", put it in the right place and check that it runs. - * + *

          * Extends LuceneTestCase rather than OpenSearchTestCase as OpenSearchTestCase installs a system call filter, and * that prevents the Spawner class from doing its job. Also needs to run in a separate JVM to other * tests that extend OpenSearchTestCase for the same reason. diff --git a/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java b/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java index 02a613be320c2..4bb3877fc04a8 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java +++ b/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java @@ -441,7 +441,7 @@ public static Path createTempDir(String prefix) throws IOException { /** * Run the given action with a temporary copy of the config directory. - * + *

          * Files under the path passed to the action may be modified as necessary for the * test to execute, and running OpenSearch with {@link #startOpenSearch()} will * use the temporary directory. diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java b/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java index 7904d1a046916..958de24848178 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java @@ -51,7 +51,7 @@ /** * Asserts that a file at a path matches its status as Directory/File, and its owner. If on a posix system, also matches the permission * set is what we expect. - * + *

          * This class saves information about its failed matches in instance variables and so instances should not be reused */ public class FileMatcher extends TypeSafeMatcher { diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java b/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java index 25cefa948ff10..26af39d66cad3 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java @@ -137,7 +137,7 @@ public static Installation ofContainer(Shell sh, Distribution distribution) { /** * Returns the user that owns this installation. - * + *

          * For packages this is root, and for archives it is the user doing the installation. */ public String getOwner() { diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java index aef363058b394..f963f8d221bb5 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java @@ -98,11 +98,11 @@ private void waitForSearchableDocs(String index, int shardCount, int replicaCoun // Verify segment store assertBusy(() -> { - /** - * Use default tabular output and sort response based on shard,segment,primaryOrReplica columns to allow line by - * line parsing where records related to a segment (e.g. _0) are chunked together with first record belonging - * to primary while remaining *replicaCount* records belongs to replica copies - * */ + /* + Use default tabular output and sort response based on shard,segment,primaryOrReplica columns to allow line by + line parsing where records related to a segment (e.g. _0) are chunked together with first record belonging + to primary while remaining *replicaCount* records belongs to replica copies + */ Request segrepStatsRequest = new Request("GET", "/_cat/segments/" + index + "?s=shard,segment,primaryOrReplica"); segrepStatsRequest.addParameter("h", "index,shard,primaryOrReplica,segment,docs.count"); Response segrepStatsResponse = client().performRequest(segrepStatsRequest); @@ -259,7 +259,8 @@ public void testIndexing() throws Exception { * This test verifies that during rolling upgrades the segment replication does not break when replica shards can * be running on older codec versions. * - * @throws Exception exception + * @throws Exception if index creation fail + * @throws UnsupportedOperationException if cluster type is unknown */ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/7679") public void testIndexingWithSegRep() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java index 6343bd127c458..4c9f49df71257 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java @@ -60,8 +60,8 @@ public class HotThreadsIT extends OpenSearchIntegTestCase { public void testHotThreadsDontFail() throws ExecutionException, InterruptedException { - /** - * This test just checks if nothing crashes or gets stuck etc. + /* + This test just checks if nothing crashes or gets stuck etc. */ createIndex("test"); final int iters = scaledRandomIntBetween(2, 20); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java index 0197ccf059737..44ba585016d8e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java @@ -112,7 +112,7 @@ protected int numberOfEvents(String actionMasks, Function findEvents(String actionMasks, Function, Boolean> criteria) { List events = new ArrayList<>(); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java index c733329a1b5f7..e6fd9139d45f2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java @@ -30,7 +30,7 @@ /** * Integration tests for task management API with Concurrent Segment Search - * + *

          * The way the test framework bootstraps the test cluster makes it difficult to parameterize the feature flag. * Once concurrent search is moved behind a cluster setting we can parameterize these tests behind the setting. */ @@ -72,7 +72,7 @@ protected Settings featureFlagSettings() { /** * Tests the number of threads that worked on a search task. - * + *

          * Currently, we try to control concurrency by creating an index with 7 segments and rely on * the way concurrent search creates leaf slices from segments. Once more concurrency controls are introduced * we should improve this test to use those methods. diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java index aff7c5d9876ac..36fe3748e9d10 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java @@ -46,7 +46,7 @@ /** * This class tests that repository operations (Put, Delete, Verify) are blocked when the cluster is read-only. - * + *

          * The @NodeScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". */ @ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index 347011721c728..78fb01b07b6b1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -53,7 +53,7 @@ /** * This class tests that snapshot operations (Create, Delete, Restore) are blocked when the cluster is read-only. - * + *

          * The @NodeScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". */ @ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java index 737c0acc309fd..cd6cb0ca3b172 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java @@ -194,7 +194,7 @@ private static void indexDocs(BulkProcessor processor, int numDocs) { /** * Internal helper class to correlate backoff states with bulk responses. This is needed to check whether we maxed out the number * of retries but still got rejected (which is perfectly fine and can also happen from time to time under heavy load). - * + *

          * This implementation relies on an implementation detail in Retry, namely that the bulk listener is notified on the same thread * as the last call to the backoff policy's iterator. The advantage is that this is non-invasive to the rest of the production code. */ diff --git a/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java b/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java index c62c61d5919d6..aefabcb9bc14f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java @@ -69,7 +69,7 @@ /** * The purpose of this test is to verify that when a processor executes an operation asynchronously that * the expected result is the same as if the same operation happens synchronously. - * + *

          * In this test two test processor are defined that basically do the same operation, but a single processor * executes asynchronously. The result of the operation should be the same and also the order in which the * bulk responses are returned should be the same as how the corresponding index requests were defined. diff --git a/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java index f0a3b5a5901ce..b1934f901ac65 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java @@ -109,8 +109,8 @@ public List getAggregations() { @Override public List getFetchSubPhases(FetchPhaseConstructionContext context) { - /** - * Set up a fetch sub phase that throws an exception on indices whose name that start with "boom". + /* + Set up a fetch sub phase that throws an exception on indices whose name that start with "boom". */ return Collections.singletonList(fetchContext -> new FetchSubPhaseProcessor() { @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java index 38b86d307d197..737b272613a44 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java @@ -461,7 +461,7 @@ public boolean validateClusterForming() { /** * Tests that indices are properly deleted even if there is a cluster-manager transition in between. - * Test for https://github.com/elastic/elasticsearch/issues/11665 + * Test for Elasticsearch issue #11665 */ public void testIndicesDeleted() throws Exception { final String idxName = "test"; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java index bdefd7a5e199a..f485d4e402b41 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java @@ -91,7 +91,7 @@ public void testGlobalPrimaryAllocation() throws Exception { /** * This test verifies the happy path where primary shard allocation is balanced when multiple indices are created. - * + *

          * This test in general passes without primary shard balance as well due to nature of allocation algorithm which * assigns all primary shards first followed by replica copies. */ diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java index ae88dd76d54e0..547f9e7a8d380 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java @@ -509,7 +509,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { } /** - * Test for https://github.com/elastic/elasticsearch/issues/47276 which checks that the persisted metadata on a data node does not + * Test for Elasticsearch issue #47276 which checks that the persisted metadata on a data node does not * become inconsistent when using replicated closed indices. */ public void testRelocatedClosedIndexIssue() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java index f636185fd4649..d28df90216beb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java @@ -198,11 +198,11 @@ public void testNoRebalanceOnRollingRestart() throws Exception { // see https://github.com/elastic/elasticsearch/issues/14387 internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNodes(3); - /** - * We start 3 nodes and a dedicated cluster-manager. Restart on of the data-nodes and ensure that we got no relocations. - * Yet we have 6 shards 0 replica so that means if the restarting node comes back both other nodes are subject - * to relocating to the restarting node since all had 2 shards and now one node has nothing allocated. - * We have a fix for this to wait until we have allocated unallocated shards now so this shouldn't happen. + /* + We start 3 nodes and a dedicated cluster-manager. Restart on of the data-nodes and ensure that we got no relocations. + Yet we have 6 shards 0 replica so that means if the restarting node comes back both other nodes are subject + to relocating to the restarting node since all had 2 shards and now one node has nothing allocated. + We have a fix for this to wait until we have allocated unallocated shards now so this shouldn't happen. */ prepareCreate("test").setSettings( Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java index 895e24ba65132..ee94e574228df 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java @@ -1481,7 +1481,7 @@ public void testExceptionOnNegativeInterval() { /** * https://github.com/elastic/elasticsearch/issues/31760 shows an edge case where an unmapped "date" field in two indices * that are queried simultaneously can lead to the "format" parameter in the aggregation not being preserved correctly. - * + *

          * The error happens when the bucket from the "unmapped" index is received first in the reduce phase, however the case can * be recreated when aggregating about a single index with an unmapped date field and also getting "empty" buckets. */ diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java index bb90c1294ecb8..dc3b690c7f78f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java @@ -544,13 +544,13 @@ public void testNested() throws Exception { /** * https://github.com/elastic/elasticsearch/issues/33514 - * + *

          * This bug manifests as the max_bucket agg ("peak") being added to the response twice, because * the pipeline agg is run twice. This makes invalid JSON and breaks conversion to maps. * The bug was caused by an UnmappedTerms being the chosen as the first reduction target. UnmappedTerms * delegated reduction to the first non-unmapped agg, which would reduce and run pipeline aggs. But then * execution returns to the UnmappedTerms and _it_ runs pipelines as well, doubling up on the values. - * + *

          * Applies to any pipeline agg, not just max. */ public void testFieldIsntWrittenOutTwice() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java index 0cf89778c6e99..2aad0d2d38901 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java @@ -1168,7 +1168,7 @@ public void testHoltWintersMinimization() { * the default settings. Which means our mock histo will match the generated result (which it won't * if the minimizer is actually working, since the coefficients will be different and thus generate different * data) - * + *

          * We can simulate this by setting the window size == size of histo */ public void testMinimizeNotEnoughData() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java index 43b7179a335f8..4a178e7066846 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java @@ -93,8 +93,8 @@ public void testDeletePit() throws Exception { assertTrue(deletePitInfo.isSuccessful()); } validatePitStats("index", 0, 10); - /** - * Checking deleting the same PIT id again results in succeeded + /* + Checking deleting the same PIT id again results in succeeded */ deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); deletePITResponse = deleteExecute.get(); @@ -113,8 +113,8 @@ public void testDeletePitWithValidAndDeletedIds() throws Exception { pitIds.add(pitResponse.getId()); validatePitStats("index", 5, 0); - /** - * Delete Pit #1 + /* + Delete Pit #1 */ DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); ActionFuture deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); @@ -128,8 +128,8 @@ public void testDeletePitWithValidAndDeletedIds() throws Exception { pitResponse = execute.get(); pitIds.add(pitResponse.getId()); validatePitStats("index", 5, 5); - /** - * Delete PIT with both Ids #1 (which is deleted) and #2 (which is present) + /* + Delete PIT with both Ids #1 (which is deleted) and #2 (which is present) */ deletePITRequest = new DeletePitRequest(pitIds); deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); @@ -165,9 +165,9 @@ public void testDeleteAllPits() throws Exception { validatePitStats("index1", 5, 0); DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); - /** - * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context - * not found exceptions don't result in failures ( as deletion in one node is successful ) + /* + When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + not found exceptions don't result in failures ( as deletion in one node is successful ) */ ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); @@ -207,9 +207,9 @@ public Settings onNodeStopped(String nodeName) throws Exception { }); ensureGreen(); - /** - * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context - * not found exceptions don't result in failures ( as deletion in one node is successful ) + /* + When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + not found exceptions don't result in failures ( as deletion in one node is successful ) */ ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); @@ -242,9 +242,9 @@ public Settings onNodeStopped(String nodeName) throws Exception { } }); ensureGreen(); - /** - * When we invoke delete again, returns success as all readers are cleared. (Delete all on node which is Up and - * once the node restarts, all active contexts are cleared in the node ) + /* + When we invoke delete again, returns success as all readers are cleared. (Delete all on node which is Up and + once the node restarts, all active contexts are cleared in the node ) */ ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); @@ -278,8 +278,8 @@ public void testDeleteWhileSearch() throws Exception { } } } catch (Exception e) { - /** - * assert for exception once delete pit goes through. throw error in case of any exeption before that. + /* + assert for exception once delete pit goes through. throw error in case of any exeption before that. */ if (deleted.get() == true) { Throwable t = ExceptionsHelper.unwrapCause(e.getCause()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java index 00ac574b8bd72..b99f66850e9e3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java @@ -220,8 +220,8 @@ public void testPitWithSearchAfter() throws Exception { .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) .get(); assertEquals(3, sr.getHits().getHits().length); - /** - * Add new data and assert PIT results remain the same and normal search results gets refreshed + /* + Add new data and assert PIT results remain the same and normal search results gets refreshed */ indexRandom(true, client().prepareIndex("test").setId("4").setSource("field1", 102)); sr = client().prepareSearch() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java index 7880fc24fd846..1b8bd9694483d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java @@ -91,16 +91,16 @@ protected boolean forbidPrivateIndexSettings() { } public void testManyToManyGeoPoints() throws ExecutionException, InterruptedException, IOException { - /** - * | q | d1 | d2 - * | | | - * | | | - * | | | - * |2 o| x | x - * | | | - * |1 o| x | x - * |___________________________ - * 1 2 3 4 5 6 7 + /* + | q | d1 | d2 + | | | + | | | + | | | + |2 o| x | x + | | | + |1 o| x | x + |___________________________ + 1 2 3 4 5 6 7 */ Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomIndexCompatibleVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); @@ -187,11 +187,10 @@ public void testManyToManyGeoPoints() throws ExecutionException, InterruptedExce } public void testSingeToManyAvgMedian() throws ExecutionException, InterruptedException, IOException { - /** - * q = (0, 0) - * - * d1 = (0, 1), (0, 4), (0, 10); so avg. distance is 5, median distance is 4 - * d2 = (0, 1), (0, 5), (0, 6); so avg. distance is 4, median distance is 5 + /* + q = (0, 0) + d1 = (0, 1), (0, 4), (0, 10); so avg. distance is 5, median distance is 4 + d2 = (0, 1), (0, 5), (0, 6); so avg. distance is 4, median distance is 5 */ Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomIndexCompatibleVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); @@ -256,16 +255,17 @@ protected void createShuffeldJSONArray(XContentBuilder builder, GeoPoint[] point } public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionException, InterruptedException, IOException { - /** q d1 d2 - * |4 o| x | x - * | | | - * |3 o| x | x - * | | | - * |2 o| x | x - * | | | - * |1 o|x |x - * |______________________ - * 1 2 3 4 5 6 + /* + q d1 d2 + |4 o| x | x + | | | + |3 o| x | x + | | | + |2 o| x | x + | | | + |1 o|x |x + |______________________ + 1 2 3 4 5 6 */ Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomIndexCompatibleVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java index 5268d5ddf35af..91b0aa6438753 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -1884,7 +1884,7 @@ public void testCannotCreateSnapshotsWithSameName() throws Exception { * This test ensures that when a shard is removed from a node (perhaps due to the node * leaving the cluster, then returning), all snapshotting of that shard is aborted, so * all Store references held onto by the snapshot are released. - * + *

          * See https://github.com/elastic/elasticsearch/issues/20876 */ public void testSnapshotCanceledOnRemovedShard() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java b/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java index c651689e21d3d..7f016caf22149 100644 --- a/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java @@ -93,7 +93,7 @@ * provided the primaryTerm and seqNo still matches. The reason we cannot assume it will not take place after receiving the failure * is that a request can fork into two because of retries on disconnect, and now race against itself. The retry might complete (and do a * dirty or stale read) before the forked off request gets to execute, and that one might still subsequently succeed. - * + *

          * Such writes are not necessarily fully replicated and can be lost. There is no * guarantee that the previous value did not have the specified primaryTerm and seqNo

        • *
        • CAS writes with other exceptions might or might not have taken place. If they have taken place, then after invocation but not diff --git a/server/src/main/java/org/apache/lucene/index/OneMergeHelper.java b/server/src/main/java/org/apache/lucene/index/OneMergeHelper.java index 8f67bb87b5c42..0ada02a09d157 100644 --- a/server/src/main/java/org/apache/lucene/index/OneMergeHelper.java +++ b/server/src/main/java/org/apache/lucene/index/OneMergeHelper.java @@ -59,8 +59,8 @@ public static double getMbPerSec(Thread thread, MergePolicy.OneMerge merge) { * Returns total bytes written by this merge. **/ public static long getTotalBytesWritten(Thread thread, MergePolicy.OneMerge merge) throws IOException { - /** - * TODO: The number of bytes written during the merge should be accessible in OneMerge. + /* + TODO: The number of bytes written during the merge should be accessible in OneMerge. */ if (thread instanceof ConcurrentMergeScheduler.MergeThread) { return ((ConcurrentMergeScheduler.MergeThread) thread).rateLimiter.getTotalBytesWritten(); diff --git a/server/src/main/java/org/apache/lucene/queryparser/classic/XQueryParser.java b/server/src/main/java/org/apache/lucene/queryparser/classic/XQueryParser.java index 9a8c295d60ec7..2f00ea69207a7 100644 --- a/server/src/main/java/org/apache/lucene/queryparser/classic/XQueryParser.java +++ b/server/src/main/java/org/apache/lucene/queryparser/classic/XQueryParser.java @@ -38,7 +38,7 @@ /** * This class is just a workaround to make {@link QueryParser#handleBareFuzzy(String, Token, String)} accessible by sub-classes. * It is needed for {@link QueryParser}s that need to override the parsing of the slop in a fuzzy query (e.g. word~2, word~). - * + *

          * TODO: We should maybe rewrite this with the flexible query parser which matches the same syntax with more freedom. */ public class XQueryParser extends QueryParser { diff --git a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java b/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java index e4c299ba572b1..9ca0491bc29f5 100644 --- a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java +++ b/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java @@ -50,7 +50,7 @@ * A collector that groups documents based on field values and returns {@link CollapseTopFieldDocs} * output. The collapsing is done in a single pass by selecting only the top sorted document per collapse key. * The value used for the collapse key of each group can be found in {@link CollapseTopFieldDocs#collapseValues}. - * + *

          * TODO: If the sort is based on score we should propagate the mininum competitive score when orderedGroups is full. * This is safe for collapsing since the group sort is the same as the query sort. */ diff --git a/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java b/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java index 6fde39b16a59a..4edcdea42b53b 100644 --- a/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java +++ b/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java @@ -43,13 +43,13 @@ * If the {@link BreakIterator} cannot find a passage smaller than the maximum length, * a secondary break iterator is used to re-split the passage at the first boundary after * maximum length. - * + *

          * This is useful to split passages created by {@link BreakIterator}s like `sentence` that * can create big outliers on semi-structured text. - * + *

          * * WARNING: This break iterator is designed to work with the {@link UnifiedHighlighter}. - * + *

          * TODO: We should be able to create passages incrementally, starting from the offset of the first match and expanding or not * depending on the offsets of subsequent matches. This is currently impossible because {@link FieldHighlighter} uses * only the first matching offset to derive the start and end of each passage. diff --git a/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java b/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java index 4777b77cfbfed..9e9f6d1fd817d 100644 --- a/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java +++ b/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java @@ -38,8 +38,8 @@ /** * Forked from Lucene 8.x; removed in Lucene 9.0 * - * @todo further investigate a better alternative - * + * TODO: further investigate a better alternative + *

          * Space optimized random access capable array of values with a fixed number of bits/value. Values * are packed contiguously. * diff --git a/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java b/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java index 0324522e9a68d..53cf4ed8e2273 100644 --- a/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java +++ b/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java @@ -25,8 +25,8 @@ /** * Forked from Lucene 8.x; removed in Lucene 9.0 * - * @todo further investigate a better alternative - * + * TODO: further investigate a better alternative + *

          * This class is similar to {@link Packed64} except that it trades space for speed by ensuring that * a single block needs to be read/written in order to read/write a value. */ diff --git a/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java b/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java index f94a4531a7db9..4260d34ead7c9 100644 --- a/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java +++ b/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java @@ -35,9 +35,9 @@ /** * Forked from Lucene 8.x; removed in Lucene 8.9 - * + *

          * Todo: further investigate a better alternative - * + *

          * Simplistic compression for array of unsigned long values. Each value is {@code >= 0} and {@code * <=} a specified maximum value. The values are stored as packed ints, with each value consuming a * fixed number of bits. diff --git a/server/src/main/java/org/opensearch/action/AliasesRequest.java b/server/src/main/java/org/opensearch/action/AliasesRequest.java index 4c5d5628b1aac..3632ba2d7304f 100644 --- a/server/src/main/java/org/opensearch/action/AliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/AliasesRequest.java @@ -54,7 +54,7 @@ public interface AliasesRequest extends IndicesRequest.Replaceable { /** * Replaces current aliases with the provided aliases. - * + *

          * Sometimes aliases expressions need to be resolved to concrete aliases prior to executing the transport action. */ void replaceAliases(String... aliases); diff --git a/server/src/main/java/org/opensearch/action/DocWriteRequest.java b/server/src/main/java/org/opensearch/action/DocWriteRequest.java index df6414470ab6b..31f61e76c74ff 100644 --- a/server/src/main/java/org/opensearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/opensearch/action/DocWriteRequest.java @@ -149,7 +149,7 @@ public interface DocWriteRequest extends IndicesRequest, Accountable { /** * If set, only perform this request if the document was last modification was assigned this primary term. - * + *

          * If the document last modification was assigned a different term a * {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown. */ diff --git a/server/src/main/java/org/opensearch/action/DocWriteResponse.java b/server/src/main/java/org/opensearch/action/DocWriteResponse.java index afdb1d3a0bdd9..e3052b3b80035 100644 --- a/server/src/main/java/org/opensearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/opensearch/action/DocWriteResponse.java @@ -341,7 +341,7 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t /** * Parse the output of the {@link #innerToXContent(XContentBuilder, Params)} method. - * + *

          * This method is intended to be called by subclasses and must be called multiple times to parse all the information concerning * {@link DocWriteResponse} objects. It always parses the current token, updates the given parsing context accordingly * if needed and then immediately returns. diff --git a/server/src/main/java/org/opensearch/action/TaskOperationFailure.java b/server/src/main/java/org/opensearch/action/TaskOperationFailure.java index 0930bd2741810..5948dd3e2b7cb 100644 --- a/server/src/main/java/org/opensearch/action/TaskOperationFailure.java +++ b/server/src/main/java/org/opensearch/action/TaskOperationFailure.java @@ -50,7 +50,7 @@ /** * Information about task operation failures - * + *

          * The class is final due to serialization limitations * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java index 625aa91e6ea7f..3dec781f0acf4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java @@ -95,7 +95,7 @@ public ClusterAllocationExplainRequest(StreamInput in) throws IOException { * Create a new allocation explain request. If {@code primary} is false, the first unassigned replica * will be picked for explanation. If no replicas are unassigned, the first assigned replica will * be explained. - * + *

          * Package private for testing. */ ClusterAllocationExplainRequest(String index, int shard, boolean primary, @Nullable String currentNode) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index 8d82827f4ee50..e62c83490d810 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -67,7 +67,7 @@ /** * ActionType to get a single task. If the task isn't running then it'll try to request the status from request index. - * + *

          * The general flow is: *

            *
          • If this isn't being executed on the node to which the requested TaskId belongs then move to that node. diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index f26ccbc569cf0..774bffa10da4f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -67,7 +67,7 @@ /** * Repository cleanup action for repository implementations based on {@link BlobStoreRepository}. - * + *

            * The steps taken by the repository cleanup operation are as follows: *

              *
            1. Check that there are no running repository cleanup, snapshot create, or snapshot delete actions diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java index ac975e917e056..6fb086f65497e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java @@ -88,7 +88,7 @@ public ActionRequestValidationException validate() { /** * The names of the repositories. * - * @return list of repositories + * @return array of repository names */ public String[] repositories() { return this.repositories; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index 9f8fc6067f405..0cfc24994768a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -261,7 +261,7 @@ public CreateSnapshotRequest indices(List indices) { /** * Returns a list of indices that should be included into the snapshot * - * @return list of indices + * @return array of index names */ @Override public String[] indices() { @@ -316,7 +316,7 @@ public CreateSnapshotRequest partial(boolean partial) { /** * If set to true the operation should wait for the snapshot completion before returning. - * + *

              * By default, the operation will return as soon as snapshot is initialized. It can be changed by setting this * flag to true. * diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 256850da0d503..c688b13ed9fdd 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -497,7 +497,7 @@ public Settings indexSettings() { * this is the snapshot that this request restores. If the client can only identify a snapshot by its name then there is a risk that the * desired snapshot may be deleted and replaced by a new snapshot with the same name which is inconsistent with the original one. This * method lets us fail the restore if the precise snapshot we want is not available. - * + *

              * This is for internal use only and is not exposed in the REST layer. */ public RestoreSnapshotRequest snapshotUuid(String snapshotUuid) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java index 7cda041b7d9cf..814a65e2a5bf0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java @@ -81,7 +81,7 @@ public Builder addIndices(String... indices) { /** * Specifies what type of requested indices to ignore and wildcard indices expressions. - * + *

              * For example indices that don't exist. */ @SuppressWarnings("unchecked") diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java index 7cda0c1948d24..d048a3008f588 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java @@ -243,7 +243,7 @@ public CreateIndexRequest settings(Map source) { /** * Set the mapping for this index - * + *

              * The mapping should be in the form of a JSON string, with an outer _doc key *

                    *     .mapping("{\"_doc\":{\"properties\": ... }}")
              @@ -269,7 +269,7 @@ public CreateIndexRequest mapping(String source, XContentType xContentType) {
               
                   /**
                    * Adds mapping that will be added when the index gets created.
              -     *
              +     * 

              * Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -296,7 +296,7 @@ private CreateIndexRequest mapping(BytesReference source, XContentType xContentT /** * Adds mapping that will be added when the index gets created. - * + *

              * Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -432,7 +432,7 @@ public CreateIndexRequest source(String source, XContentType xContentType) { /** * Sets the settings and mappings as a single source. - * + *

              * Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(String source, MediaType mediaType) { @@ -458,7 +458,7 @@ public CreateIndexRequest source(byte[] source, XContentType xContentType) { /** * Sets the settings and mappings as a single source. - * + *

              * Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(byte[] source, MediaType mediaType) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java index 0cff01af536dc..384ae2e028bba 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java @@ -130,7 +130,7 @@ protected void writeNodesTo(StreamOutput out, List * NOTE: visible for testing * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java index e3c812aedcfe7..6a1d04efa9714 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java @@ -46,7 +46,7 @@ /** * Request the mappings of specific fields - * + *

              * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should go to that client class as well. * diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java index d874b5bb6b1ac..94c88e30295a8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java @@ -61,7 +61,7 @@ /** * Response object for {@link GetFieldMappingsRequest} API - * + *

              * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should go to that client class as well. * diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java index 4a2d2fbe8e950..32f751ceb1c5a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -210,7 +210,7 @@ public String source() { /** * A specialized simplified mapping source method, takes the form of simple properties definition: * ("field1", "type=string,store=true"). - * + *

              * Also supports metadata mapping fields such as `_all` and `_parent` as property definition, these metadata * mapping fields will automatically be put on the top level mapping object. */ diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java index c46940fbfecf9..b82b68f6f9489 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java @@ -89,7 +89,7 @@ public void activeOnly(boolean activeOnly) { /** * Contains list of shard id's if shards are passed, empty otherwise. Array is empty by default. * - * @return list of shard id's if shards are passed, empty otherwise + * @return array of shard id's if shards are passed, empty otherwise */ public String[] shards() { return shards; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java index b25bc94a5c8e2..353cdbbbc840c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java @@ -54,7 +54,7 @@ /** * Request class to swap index under an alias or increment data stream generation upon satisfying conditions - * + *

              * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should also go to that client class. * diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java index 55ee65d0a4973..a66fcc9e9bcf2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java @@ -51,7 +51,7 @@ /** * Response object for {@link RolloverRequest} API - * + *

              * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should also go to that client class. * diff --git a/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java b/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java index 0d6d122e31261..25a2c081f8441 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java +++ b/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java @@ -40,9 +40,9 @@ /** * Provides a backoff policy for bulk requests. Whenever a bulk request is rejected due to resource constraints (i.e. the client's internal * thread pool is full), the backoff policy decides how long the bulk processor will wait before the operation is retried internally. - * + *

              * Notes for implementing custom subclasses: - * + *

              * The underlying mathematical principle of BackoffPolicy are progressions which can be either finite or infinite although * the latter should not be used for retrying. A progression can be mapped to a java.util.Iterator with the following * semantics: @@ -241,7 +241,7 @@ private static class ExponentialEqualJitterBackoffIterator implements Iterator * NOTE: If the value is greater than 30, there can be integer overflow * issues during delay calculation. **/ diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java index 138f21f1f80e5..20dea872e0ad2 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java @@ -209,7 +209,7 @@ public static class Failure implements Writeable, ToXContentFragment { /** * For write failures before operation was assigned a sequence number. - * + *

              * use @{link {@link #Failure(String, String, Exception, long, long)}} * to record operation sequence no with failure */ diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java index baf64b3e80af6..141ec24fc390f 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java @@ -185,7 +185,7 @@ public Builder setGlobalPipeline(String globalPipeline) { /** * Sets a custom backoff policy. The backoff policy defines how the bulk processor should handle retries of bulk requests internally * in case they have failed due to resource constraints (i.e. a thread pool was full). - * + *

              * The default is to back off exponentially. * * @see org.opensearch.action.bulk.BackoffPolicy#exponentialBackoff() diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java index 65043da6c2684..f2f3077001a13 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java @@ -67,7 +67,7 @@ /** * A bulk request holds an ordered {@link IndexRequest}s, {@link DeleteRequest}s and {@link UpdateRequest}s * and allows to executes it in a single batch. - * + *

              * Note that we only support refresh on the bulk request not per item. * @see org.opensearch.client.Client#bulk(BulkRequest) * @@ -123,7 +123,7 @@ public BulkRequest add(DocWriteRequest... requests) { /** * Add a request to the current BulkRequest. - * + *

              * Note for internal callers: This method does not respect all global parameters. * Only the global index is applied to the request objects. * Global parameters would be respected if the request was serialized for a REST call as it is @@ -347,7 +347,7 @@ public final BulkRequest timeout(TimeValue timeout) { /** * Note for internal callers (NOT high level rest client), * the global parameter setting is ignored when used with: - * + *

              * - {@link BulkRequest#add(IndexRequest)} * - {@link BulkRequest#add(UpdateRequest)} * - {@link BulkRequest#add(DocWriteRequest)} @@ -364,7 +364,7 @@ public final BulkRequest pipeline(String globalPipeline) { /** * Note for internal callers (NOT high level rest client), * the global parameter setting is ignored when used with: - * + *

              - {@link BulkRequest#add(IndexRequest)} - {@link BulkRequest#add(UpdateRequest)} - {@link BulkRequest#add(DocWriteRequest)} @@ -404,7 +404,7 @@ public Boolean requireAlias() { /** * Note for internal callers (NOT high level rest client), * the global parameter setting is ignored when used with: - * + *

              * - {@link BulkRequest#add(IndexRequest)} * - {@link BulkRequest#add(UpdateRequest)} * - {@link BulkRequest#add(DocWriteRequest)} diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java index 81ae3d78e8ced..9a9e861ad8055 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java @@ -205,7 +205,7 @@ public long ifSeqNo() { /** * If set, only perform this delete request if the document was last modification was assigned this primary term. - * + *

              * If the document last modification was assigned a different term a * {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown. */ diff --git a/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java b/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java index 039214459ac21..6cbabfec6d763 100644 --- a/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java +++ b/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java @@ -40,7 +40,7 @@ /** * Performs the delete operation. - * + *

              * Deprecated use TransportBulkAction with a single item instead * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequest.java b/server/src/main/java/org/opensearch/action/index/IndexRequest.java index d0b78cfd49d4b..c7926e65cf792 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/opensearch/action/index/IndexRequest.java @@ -76,14 +76,14 @@ /** * Index request to index a typed JSON document into a specific index and make it searchable. Best * created using {@link org.opensearch.client.Requests#indexRequest(String)}. - * + *

              * The index requires the {@link #index()}, {@link #id(String)} and * {@link #source(byte[], MediaType)} to be set. - * + *

              * The source (content to index) can be set in its bytes form using ({@link #source(byte[], MediaType)}), * its string form ({@link #source(String, MediaType)}) or using a {@link XContentBuilder} * ({@link #source(XContentBuilder)}). - * + *

              * If the {@link #id(String)} is not set, it will be automatically generated. * * @see IndexResponse @@ -388,7 +388,7 @@ public IndexRequest source(Map source, MediaType contentType) throws /** * Sets the document source to index. - * + *

              * Note, its preferable to either set it using {@link #source(XContentBuilder)} * or using the {@link #source(byte[], MediaType)}. */ @@ -591,7 +591,7 @@ public long ifSeqNo() { /** * If set, only perform this indexing request if the document was last modification was assigned this primary term. - * + *

              * If the document last modification was assigned a different term a * {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown. */ diff --git a/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java b/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java index fe4f80bf0c065..ce32840f6751b 100644 --- a/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java +++ b/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java @@ -40,7 +40,7 @@ /** * Performs the index operation. - * + *

              * Allows for the following settings: *

                *
              • autoCreateIndex: When set to {@code true}, will automatically create an index if one does not exists. diff --git a/server/src/main/java/org/opensearch/action/ingest/IngestActionForwarder.java b/server/src/main/java/org/opensearch/action/ingest/IngestActionForwarder.java index 9927affbc7442..2821f4fd7fadb 100644 --- a/server/src/main/java/org/opensearch/action/ingest/IngestActionForwarder.java +++ b/server/src/main/java/org/opensearch/action/ingest/IngestActionForwarder.java @@ -46,7 +46,7 @@ /** * A utility for forwarding ingest requests to ingest nodes in a round-robin fashion. - * + *

                * TODO: move this into IngestService and make index/bulk actions call that * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitController.java b/server/src/main/java/org/opensearch/action/search/CreatePitController.java index 1d11ad2760675..87eb27bdb8255 100644 --- a/server/src/main/java/org/opensearch/action/search/CreatePitController.java +++ b/server/src/main/java/org/opensearch/action/search/CreatePitController.java @@ -98,18 +98,18 @@ public void executeCreatePit( task.getParentTaskId(), Collections.emptyMap() ); - /** - * This is needed for cross cluster functionality to work with PITs and current ccsMinimizeRoundTrips is - * not supported for point in time + /* + This is needed for cross cluster functionality to work with PITs and current ccsMinimizeRoundTrips is + not supported for point in time */ searchRequest.setCcsMinimizeRoundtrips(false); - /** - * Phase 1 of create PIT + /* + Phase 1 of create PIT */ executeCreatePit(searchTask, searchRequest, createPitListener); - /** - * Phase 2 of create PIT where we update pit id in pit contexts + /* + Phase 2 of create PIT where we update pit id in pit contexts */ createPitListener.whenComplete( searchResponse -> { executeUpdatePitId(request, searchRequest, searchResponse, updatePitIdListener); }, @@ -167,9 +167,9 @@ void executeUpdatePitId( searchResponse.pointInTimeId() ) ); - /** - * store the create time ( same create time for all PIT contexts across shards ) to be used - * for list PIT api + /* + store the create time ( same create time for all PIT contexts across shards ) to be used + for list PIT api */ final long relativeStartNanos = System.nanoTime(); final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java index cca85f92d2676..161a103cdf36a 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java @@ -162,7 +162,7 @@ public AggregatedDfs aggregateDfs(Collection results) { * Returns a score doc array of top N search docs across all shards, followed by top suggest docs for each * named completion suggestion across all shards. If more than one named completion suggestion is specified in the * request, the suggest docs for a named suggestion are ordered by the suggestion name. - * + *

                * Note: The order of the sorted score docs depends on the shard index in the result array if the merge process needs to disambiguate * the result. In oder to obtain stable results the shard index (index of the result in the result array) must be the same. * @@ -284,7 +284,7 @@ public List[] fillDocIdsToLoad(int numShards, ScoreDoc[] shardDocs) { /** * Enriches search hits and completion suggestion hits from sortedDocs using fetchResultsArr, * merges suggestions, aggregations and profile results - * + *

                * Expects sortedDocs to have top search docs across all shards, optionally followed by top suggest docs for each named * completion suggestion ordered by suggestion name */ diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java index 762022460ebdb..21cf0ed97b9da 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java @@ -600,7 +600,7 @@ public void setMaxConcurrentShardRequests(int maxConcurrentShardRequests) { * the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for * instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard * bounds and the query are disjoint. - * + *

                * When unspecified, the pre-filter phase is executed if any of these conditions is met: *

                  *
                • The request targets more than 128 shards
                • @@ -621,7 +621,7 @@ public void setPreFilterShardSize(int preFilterShardSize) { * This filter roundtrip can limit the number of shards significantly if for * instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard * bounds and the query are disjoint. - * + *

                  * When unspecified, the pre-filter phase is executed if any of these conditions is met: *

                    *
                  • The request targets more than 128 shards
                  • diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java index 861e1df0203d7..bc43b65e5d844 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java @@ -605,7 +605,7 @@ public SearchRequestBuilder setMaxConcurrentShardRequests(int maxConcurrentShard * the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for * instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard * bounds and the query are disjoint. - * + *

                    * When unspecified, the pre-filter phase is executed if any of these conditions is met: *

                      *
                    • The request targets more than 128 shards
                    • diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java b/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java index 2e447abd125c5..579a01f0dd932 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java @@ -53,7 +53,7 @@ /** * Base class that holds the various sections which a search response is * composed of (hits, aggs, suggestions etc.) and allows to retrieve them. - * + *

                      * The reason why this class exists is that the high level REST client uses its own classes * to parse aggregations into, which are not serializable. This is the common part that can be * shared between core and client. diff --git a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java index 614d576324026..b15a4b66e8870 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java @@ -76,7 +76,7 @@ private void deletePits(ActionListener listener, DeletePitReq /** * Delete all active PIT reader contexts leveraging list all PITs - * + *

                      * For Cross cluster PITs : * - mixed cluster PITs ( PIT comprising local and remote ) will be fully deleted. Since there will atleast be * one reader context with PIT ID present in local cluster, 'Get all PITs' will retrieve the PIT ID with which diff --git a/server/src/main/java/org/opensearch/action/support/TransportAction.java b/server/src/main/java/org/opensearch/action/support/TransportAction.java index daa11c2d7d80f..72aae210d61ae 100644 --- a/server/src/main/java/org/opensearch/action/support/TransportAction.java +++ b/server/src/main/java/org/opensearch/action/support/TransportAction.java @@ -81,7 +81,7 @@ private Releasable registerChildNode(TaskId parentTask) { /** * Use this method when the transport action call should result in creation of a new task associated with the call. - * + *

                      * This is a typical behavior. */ public final Task execute(Request request, ActionListener listener) { diff --git a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java index f5fb41dc5bae3..4d54ce51c923c 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java @@ -53,9 +53,9 @@ public abstract class BaseNodesRequest * the list of nodesIds that will be used to resolve this request and {@link #concreteNodes} * will be populated. Note that if {@link #concreteNodes} is not null, it will be used and nodeIds * will be ignored. - * + *

                      * See {@link DiscoveryNodes#resolveNodes} for a full description of the options. - * + *

                      * TODO: get rid of this and resolve it to concrete nodes in the rest layer **/ private String[] nodesIds; diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java index 60c490a50575a..9f69d41d83f5b 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java @@ -479,7 +479,7 @@ public interface Primary< /** * Notifies the primary of a local checkpoint for the given allocation. - * + *

                      * Note: The primary will use this information to advance the global checkpoint if possible. * * @param allocationId allocation ID of the shard corresponding to the supplied local checkpoint diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index de5a92fdcc4b1..b68bd13cfed80 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -100,7 +100,7 @@ /** * Base class for requests that should be executed on a primary copy followed by replica copies. * Subclasses can resolve the target shard and provide implementation for primary and replica operations. - * + *

                      * The action samples cluster state on the receiving node to reroute to node with primary copy and on the * primary node to validate request before primary operation followed by sampling state again for resolving * nodes with replica copies to perform replication. @@ -866,7 +866,7 @@ protected IndexShard getIndexShard(final ShardId shardId) { * Responsible for routing and retrying failed operations on the primary. * The actual primary operation is done in {@link ReplicationOperation} on the * node with primary copy. - * + *

                      * Resolves index and shard id for the request before routing it to target node * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java index 62cbfbde9780a..a0b5299805868 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java @@ -266,7 +266,7 @@ protected abstract void dispatchedShardOperationOnReplica( /** * Result of taking the action on the primary. - * + *

                      * NOTE: public for testing * * @opensearch.internal @@ -496,7 +496,7 @@ void run() { * A proxy for write operations that need to be performed on the * replicas, where a failure to execute the operation should fail * the replica shard and/or mark the replica as stale. - * + *

                      * This extends {@code TransportReplicationAction.ReplicasProxy} to do the * failing and stale-ing. * diff --git a/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java b/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java index c474096ff94e4..56b34aea8248d 100644 --- a/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java +++ b/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java @@ -55,7 +55,7 @@ public abstract class SingleShardRequest * Whether index property is optional depends on the concrete implementation. If index property is required the * concrete implementation should use {@link #validateNonNullIndex()} to check if the index property has been set */ diff --git a/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksRequest.java b/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksRequest.java index a10f74f7c2aa8..c2ae333b17055 100644 --- a/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksRequest.java +++ b/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksRequest.java @@ -128,7 +128,7 @@ public final Request setNodes(String... nodes) { /** * Returns the id of the task that should be processed. - * + *

                      * By default tasks with any ids are returned. */ public TaskId getTaskId() { diff --git a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java index b354482871521..946c3c2446173 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java @@ -589,7 +589,7 @@ public long ifSeqNo() { /** * If set, only perform this update request if the document was last modification was assigned this primary term. - * + *

                      * If the document last modification was assigned a different term a * {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown. */ diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java index f9661e71d60e6..e43c42446de2c 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java @@ -73,7 +73,7 @@ /** * We enforce bootstrap checks once a node has the transport protocol bound to a non-loopback interface or if the system property {@code - * opensearch.enforce.bootstrap.checks} is set to {@true}. In this case we assume the node is running in production and + * opensearch.enforce.bootstrap.checks} is set to {@code true}. In this case we assume the node is running in production and * all bootstrap checks must pass. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java b/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java index 8e556df4b2f9b..91da34fb7216d 100644 --- a/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java +++ b/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java @@ -141,7 +141,7 @@ public boolean callback(long dwCtrlType) { /** * Memory protection constraints - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366786%28v=vs.85%29.aspx */ public static final int PAGE_NOACCESS = 0x0001; @@ -151,7 +151,7 @@ public boolean callback(long dwCtrlType) { /** * Contains information about a range of pages in the virtual address space of a process. * The VirtualQuery and VirtualQueryEx functions use this structure. - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366775%28v=vs.85%29.aspx */ public static class MemoryBasicInformation extends Structure { @@ -186,7 +186,7 @@ public SizeT() { /** * Locks the specified region of the process's virtual address space into physical * memory, ensuring that subsequent access to the region will not incur a page fault. - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366895%28v=vs.85%29.aspx * * @param address A pointer to the base address of the region of pages to be locked. @@ -197,7 +197,7 @@ public SizeT() { /** * Retrieves information about a range of pages within the virtual address space of a specified process. - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366907%28v=vs.85%29.aspx * * @param handle A handle to the process whose memory information is queried. @@ -210,7 +210,7 @@ public SizeT() { /** * Sets the minimum and maximum working set sizes for the specified process. - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686234%28v=vs.85%29.aspx * * @param handle A handle to the process whose working set sizes is to be set. @@ -222,7 +222,7 @@ public SizeT() { /** * Retrieves a pseudo handle for the current process. - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/ms683179%28v=vs.85%29.aspx * * @return a pseudo handle to the current process. @@ -231,7 +231,7 @@ public SizeT() { /** * Closes an open object handle. - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/ms724211%28v=vs.85%29.aspx * * @param handle A valid handle to an open object. @@ -252,7 +252,7 @@ public SizeT() { /** * Creates or opens a new job object - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/ms682409%28v=vs.85%29.aspx * * @param jobAttributes security attributes @@ -263,7 +263,7 @@ public SizeT() { /** * Associates a process with an existing job - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/ms681949%28v=vs.85%29.aspx * * @param job job handle @@ -274,7 +274,7 @@ public SizeT() { /** * Basic limit information for a job object - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/ms684147%28v=vs.85%29.aspx */ public static class JOBOBJECT_BASIC_LIMIT_INFORMATION extends Structure implements Structure.ByReference { @@ -316,7 +316,7 @@ protected List getFieldOrder() { /** * Get job limit and state information - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/ms684925%28v=vs.85%29.aspx * * @param job job handle @@ -330,7 +330,7 @@ protected List getFieldOrder() { /** * Set job limit and state information - * + *

                      * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686216%28v=vs.85%29.aspx * * @param job job handle diff --git a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java index ab52ae5a43a2a..4d36efff0e192 100644 --- a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java +++ b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java @@ -188,9 +188,9 @@ void init(final boolean daemonize, final Path pidFile, final boolean quiet, Envi /** * Required method that's called by Apache Commons procrun when * running as a service on Windows, when the service is stopped. - * + *

                      * http://commons.apache.org/proper/commons-daemon/procrun.html - * + *

                      * NOTE: If this method is renamed and/or moved, make sure to * update opensearch-service.bat! */ diff --git a/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java b/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java index 482087be1c8eb..28e1e7c53cb9c 100644 --- a/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java +++ b/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java @@ -44,7 +44,7 @@ public interface AckedClusterStateTaskListener extends ClusterStateTaskListener /** * Called to determine which nodes the acknowledgement is expected from. - * + *

                      * As this method will be called multiple times to determine the set of acking nodes, * it is crucial for it to return consistent results: Given the same listener instance * and the same node parameter, the method implementation should return the same result. diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java index 50beeb1f03deb..bf8494cc36857 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java @@ -70,7 +70,7 @@ default boolean runOnlyOnMaster() { /** * Callback invoked after new cluster state is published. Note that * this method is not invoked if the cluster state was not updated. - * + *

                      * Note that this method will be executed using system context. * * @param clusterChangedEvent the change event for this cluster state change, containing @@ -80,7 +80,7 @@ default void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) {} /** * Builds a concise description of a list of tasks (to be used in logging etc.). - * + *

                      * Note that the tasks given are not necessarily the same as those that will be passed to {@link #execute(ClusterState, List)}. * but are guaranteed to be a subset of them. This method can be called multiple times with different lists before execution. * This allows groupd task description but the submitting source. diff --git a/server/src/main/java/org/opensearch/cluster/DiffableUtils.java b/server/src/main/java/org/opensearch/cluster/DiffableUtils.java index dd2232968114e..a38fc81bebc08 100644 --- a/server/src/main/java/org/opensearch/cluster/DiffableUtils.java +++ b/server/src/main/java/org/opensearch/cluster/DiffableUtils.java @@ -182,7 +182,7 @@ public Map apply(Map map) { /** * Represents differences between two maps of objects and is used as base class for different map implementations. - * + *

                      * Implements serialization. How differences are applied is left to subclasses. * * @param the type of map keys @@ -381,9 +381,9 @@ public Integer readKey(StreamInput in) throws IOException { /** * Provides read and write operations to serialize map values. * Reading of values can be made dependent on map key. - * + *

                      * Also provides operations to distinguish whether map values are diffable. - * + *

                      * Should not be directly implemented, instead implement either * {@link DiffableValueSerializer} or {@link NonDiffableValueSerializer}. * @@ -517,7 +517,7 @@ public Diff readDiff(StreamInput in, K key) throws IOException { /** * Implementation of the ValueSerializer that wraps value and diff readers. - * + *

                      * Note: this implementation is ignoring the key. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java index 35490d2f37a49..e381b8f244bf3 100644 --- a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java @@ -82,7 +82,7 @@ * to 30 seconds). The InternalClusterInfoService only runs on the cluster-manager node. * Listens for changes in the number of data nodes and immediately submits a * ClusterInfoUpdateJob if a node has been added. - * + *

                      * Every time the timer runs, gathers information about the disk usage and * shard sizes across the cluster. * diff --git a/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java b/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java index 042a4743ca25d..3e0c78099e6b4 100644 --- a/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java @@ -189,7 +189,7 @@ public Snapshot snapshot() { /** * Returns list of shards that being restore and their status * - * @return list of shards + * @return map of shard id to shard restore status */ public Map shards() { return this.shards; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java index 39d05e672f977..3a506397bcac8 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java @@ -46,11 +46,11 @@ public interface ClusterStatePublisher { /** * Publish all the changes to the cluster from the cluster-manager (can be called just by the cluster-manager). The publish * process should apply this state to the cluster-manager as well! - * + *

                      * The publishListener allows to wait for the publication to complete, which can be either successful completion, timing out or failing. * The method is guaranteed to pass back a {@link FailedToCommitClusterStateException} to the publishListener if the change is not * committed and should be rejected. Any other exception signals that something bad happened but the change is committed. - * + *

                      * The {@link AckListener} allows to keep track of the ack received from nodes, and verify whether * they updated their own cluster state or not. */ diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 15eaf9c8bcc1e..f701a2f52277d 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -483,10 +483,10 @@ public static void ensureNodeCommissioned(DiscoveryNode node, Metadata metadata) * 2. The joining node has to be a non-remote store backed if it is joining a non-remote store backed cluster. * Validates no remote store attributes are present in joining node as existing nodes in the cluster doesn't have * remote store attributes. - * + *

                      * A remote store backed node is the one which holds all the remote store attributes and a remote store backed * cluster is the one which has only homogeneous remote store backed nodes with same node attributes - * + *

                      * TODO: When we support moving from remote store cluster to non remote store and vice versa the this logic will * needs to be modified. */ diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java index 1570a84ab871f..128bd42fd7947 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java @@ -63,7 +63,7 @@ public class Reconfigurator { * and still the cluster would be unavailable. However not reducing the voting configuration size can also hamper resilience: in a * five-node cluster we could lose two nodes and by reducing the voting configuration to the remaining three nodes we could tolerate the * loss of a further node before failing. - * + *

                      * We offer two options: either we auto-shrink the voting configuration as long as it contains more than three nodes, or we don't and we * require the user to control the voting configuration manually using the retirement API. The former, default, option, guarantees that * as long as there have been at least three cluster-manager-eligible nodes in the cluster and no more than one of them is currently unavailable, diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java index d06e89d9ea170..cbc63565228f9 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java @@ -219,13 +219,13 @@ public void writeTo(final StreamOutput out) throws IOException { /** * Checks if an inactive primary shard should cause the cluster health to go RED. - * + *

                      * An inactive primary shard in an index should cause the cluster health to be RED to make it visible that some of the existing data is * unavailable. In case of index creation, snapshot restore or index shrinking, which are unexceptional events in the cluster lifecycle, * cluster health should not turn RED for the time where primaries are still in the initializing state but go to YELLOW instead. * However, in case of exceptional events, for example when the primary shard cannot be assigned to a node or initialization fails at * some point, cluster health should still turn RED. - * + *

                      * NB: this method should *not* be called on active shards nor on non-primary shards. */ public static ClusterHealthStatus getInactivePrimaryHealth(final ShardRouting shardRouting) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java index 0da948dc78c5d..85a203e5e059a 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java @@ -61,7 +61,7 @@ /** * A collection of tombstones for explicitly marking indices as deleted in the cluster state. - * + *

                      * The cluster state contains a list of index tombstones for indices that have been * deleted in the cluster. Because cluster states are processed asynchronously by * nodes and a node could be removed from the cluster for a period of time, the @@ -250,7 +250,7 @@ public int getNumPurged() { /** * Purge tombstone entries. Returns the number of entries that were purged. - * + *

                      * Tombstones are purged if the number of tombstones in the list * is greater than the input parameter of maximum allowed tombstones. * Tombstones are purged until the list is equal to the maximum allowed. diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 52df72b342b3e..5d8b751b241e2 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -779,7 +779,7 @@ public long getAliasesVersion() { /** * The term of the current selected primary. This is a non-negative number incremented when * a primary shard is assigned after a full cluster restart or a replica shard is promoted to a primary. - * + *

                      * Note: since we increment the term every time a shard is assigned, the term for any operational shard (i.e., a shard * that can be indexed into) is larger than 0. See {@link IndexMetadataUpdater#applyChanges}. **/ @@ -1884,7 +1884,7 @@ public static Settings addHumanReadableSettings(Settings settings) { /** * Return the version the index was created from the provided index settings - * + *

                      * This looks for the presence of the {@link Version} object with key {@link IndexMetadata#SETTING_VERSION_CREATED} */ public static Version indexCreated(final Settings indexSettings) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java index 272bb132197af..c5efb55316b84 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java @@ -369,7 +369,7 @@ public IndexTemplateMetadata build() { /** * Serializes the template to xContent, using the legacy format where the mappings are * nested under the type name. - * + *

                      * This method is used for serializing templates before storing them in the cluster metadata, * and also in the REST layer when returning a deprecated typed response. */ @@ -386,7 +386,7 @@ public static void toXContentWithTypes( /** * Serializes the template to xContent, making sure not to nest mappings under the * type name. - * + *

                      * Note that this method should currently only be used for creating REST responses, * and not when directly updating stored templates. Index templates are still stored * in the old, typed format, and have yet to be migrated to be typeless. diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 146193b8d22c4..626903877b0c6 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -1666,7 +1666,7 @@ private SortedMap buildIndicesLookup() { /** * Validates there isn't any index with a name that would clash with the future backing indices of the existing data streams. - * + *

                      * E.g., if data stream `foo` has backing indices [`.ds-foo-000001`, `.ds-foo-000002`] and the indices lookup contains indices * `.ds-foo-000001`, `.ds-foo-000002` and `.ds-foo-000006` this will throw an IllegalStateException (as attempting to rollover the * `foo` data stream from generation 5 to 6 will not be possible) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index b2861e566dd4b..8d76a39712ee3 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -754,7 +754,7 @@ private ClusterState applyCreateIndexRequestWithExistingMetadata( /** * Parses the provided mappings json and the inheritable mappings from the templates (if any) * into a map. - * + *

                      * The template mappings are applied in the order they are encountered in the list (clients * should make sure the lower index, closer to the head of the list, templates have the highest * {@link IndexTemplateMetadata#order()}). This merging makes no distinction between field @@ -792,7 +792,7 @@ static Map parseV1Mappings( * Validates and creates the settings for the new index based on the explicitly configured settings via the * {@link CreateIndexClusterStateUpdateRequest}, inherited from templates and, if recovering from another index (ie. split, shrink, * clone), the resize settings. - * + *

                      * The template mappings are applied in the order they are encountered in the list (clients should make sure the lower index, closer * to the head of the list, templates have the highest {@link IndexTemplateMetadata#order()}) * @@ -1009,7 +1009,7 @@ static int getIndexNumberOfRoutingShards(Settings indexSettings, @Nullable Index /** * Validate and resolve the aliases explicitly set for the index, together with the ones inherited from the specified * templates. - * + *

                      * The template mappings are applied in the order they are encountered in the list (clients should make sure the lower index, closer * to the head of the list, templates have the highest {@link IndexTemplateMetadata#order()}) * diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java index 91c996448ea8f..e30e878f1b31a 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java @@ -166,7 +166,7 @@ public MetadataIndexStateService( /** * Closes one or more indices. - * + *

                      * Closing indices is a 3 steps process: it first adds a write block to every indices to close, then waits for the operations on shards * to be terminated and finally closes the indices by moving their state to CLOSE. */ @@ -302,7 +302,7 @@ public TimeValue timeout() { /** * Step 1 - Start closing indices by adding a write block - * + *

                      * This step builds the list of indices to close (the ones explicitly requested that are not in CLOSE state) and adds a unique cluster * block (or reuses an existing one) to every index to close in the cluster state. After the cluster state is published, the shards * should start to reject writing operations and we can proceed with step 2. diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index 71b86ec853ce4..1093ac09777e7 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -747,7 +747,7 @@ public static Map> findConflictingV2Templates( /** * Return a map of v2 template names to their index patterns for v2 templates that would overlap * with the given template's index patterns. - * + *

                      * Based on the provided checkPriority and priority parameters this aims to report the overlapping * index templates regardless of the priority (ie. checkPriority == false) or otherwise overlapping * templates with the same priority as the given priority parameter (this is useful when trying to diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java index 675d4e09e3be0..561f1b727efe0 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java @@ -356,7 +356,7 @@ public DiscoveryNode findByAddress(TransportAddress address) { /** * Returns the version of the node with the oldest version in the cluster that is not a client node - * + *

                      * If there are no non-client nodes, Version.CURRENT will be returned. * * @return the oldest version in the cluster @@ -367,7 +367,7 @@ public Version getSmallestNonClientNodeVersion() { /** * Returns the version of the node with the youngest version in the cluster that is not a client node. - * + *

                      * If there are no non-client nodes, Version.CURRENT will be returned. * * @return the youngest version in the cluster @@ -417,16 +417,16 @@ public DiscoveryNode resolveNode(String node) { /** * Resolves a set of nodes according to the given sequence of node specifications. Implements the logic in various APIs that allow the * user to run the action on a subset of the nodes in the cluster. See [Node specification] in the reference manual for full details. - * + *

                      * Works by tracking the current set of nodes and applying each node specification in sequence. The set starts out empty and each node * specification may either add or remove nodes. For instance: - * + *

                      * - _local, _cluster_manager (_master) and _all respectively add to the subset the local node, the currently-elected cluster_manager, and all the nodes * - node IDs, names, hostnames and IP addresses all add to the subset any nodes which match * - a wildcard-based pattern of the form "attr*:value*" adds to the subset all nodes with a matching attribute with a matching value * - role:true adds to the subset all nodes with a matching role * - role:false removes from the subset all nodes with a matching role. - * + *

                      * An empty sequence of node specifications returns all nodes, since the corresponding actions run on all nodes by default. */ public String[] resolveNodes(String... nodes) { @@ -813,7 +813,7 @@ public Builder localNodeId(String localNodeId) { * Checks that a node can be safely added to this node collection. * * @return null if all is OK or an error message explaining why a node can not be added. - * + *

                      * Note: if this method returns a non-null value, calling {@link #add(DiscoveryNode)} will fail with an * exception */ diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java index 9cc09c6e4c31c..2dd57431d0375 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java @@ -463,7 +463,7 @@ private static Map rankNodes( * OpenSearch, however, we do not have that sort of broadcast-to-all behavior. In order to prevent a node that gets a high score and * then never gets any more requests, we must ensure it eventually returns to a more normal score and can be a candidate for serving * requests. - * + *

                      * This adjustment takes the "winning" node's statistics and adds the average of those statistics with each non-winning node. Let's say * the winning node had a queue size of 10 and a non-winning node had a queue of 18. The average queue size is (10 + 18) / 2 = 14 so the * non-winning node will have statistics added for a queue size of 14. This is repeated for the response time and service times as well. diff --git a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java index 5cef46689ffc7..b01e074ce40c2 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java @@ -49,7 +49,7 @@ /** * Represents the recovery source of a shard. Available recovery types are: - * + *

                      * - {@link EmptyStoreRecoverySource} recovery from an empty store * - {@link ExistingStoreRecoverySource} recovery from an existing store * - {@link PeerRecoverySource} recovery from a primary on another node diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java index 4f7b935f15f93..5a4352653cc89 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java @@ -70,7 +70,7 @@ * {@link RoutingNodes} represents a copy the routing information contained in the {@link ClusterState cluster state}. * It can be either initialized as mutable or immutable (see {@link #RoutingNodes(ClusterState, boolean)}), allowing * or disallowing changes to its elements. - * + *

                      * The main methods used to update routing entries are: *

                        *
                      • {@link #initializeShard} initializes an unassigned shard. @@ -369,7 +369,7 @@ public ShardRouting activePrimary(ShardId shardId) { /** * Returns one active replica shard for the given shard id or null if * no active replica is found. - * + *

                        * Since replicas could possibly be on nodes with an older version of OpenSearch than * the primary is, this will return replicas on the highest version of OpenSearch when document * replication is enabled. @@ -395,7 +395,7 @@ public ShardRouting activeReplicaWithHighestVersion(ShardId shardId) { /** * Returns one active replica shard for the given shard id or null if * no active replica is found. - * + *

                        * Since replicas could possibly be on nodes with a higher version of OpenSearch than * the primary is, this will return replicas on the oldest version of OpenSearch when segment * replication is enabled to allow for replica to read segments from primary. @@ -544,9 +544,9 @@ public Tuple relocateShard( /** * Applies the relevant logic to start an initializing shard. - * + *

                        * Moves the initializing shard to started. If the shard is a relocation target, also removes the relocation source. - * + *

                        * If the started shard is a primary relocation target, this also reinitializes currently initializing replicas as their * recovery source changes * @@ -605,9 +605,9 @@ public ShardRouting startShard(Logger logger, ShardRouting initializingShard, Ro /** * Applies the relevant logic to handle a cancelled or failed shard. - * + *

                        * Moves the shard to unassigned or completely removes the shard (if relocation target). - * + *

                        * - If shard is a primary, this also fails initializing replicas. * - If shard is an active primary, this also promotes an active replica to primary (if such a replica exists). * - If shard is a relocating primary, this also removes the primary relocation target shard. diff --git a/server/src/main/java/org/opensearch/cluster/routing/ShardMovementStrategy.java b/server/src/main/java/org/opensearch/cluster/routing/ShardMovementStrategy.java index cfdeed5c227b6..7f5109416494e 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/ShardMovementStrategy.java +++ b/server/src/main/java/org/opensearch/cluster/routing/ShardMovementStrategy.java @@ -14,7 +14,7 @@ /** * ShardMovementStrategy defines the order in which shard movement occurs. - * + *

                        * ShardMovementStrategy values or rather their string representation to be used with * {@link BalancedShardsAllocator#SHARD_MOVEMENT_STRATEGY_SETTING} via cluster settings. * diff --git a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java index de36547b10707..5e748df5eed2d 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java @@ -153,7 +153,7 @@ public enum Reason { /** * Captures the status of an unsuccessful allocation attempt for the shard, * causing it to remain in the unassigned state. - * + *

                        * Note, ordering of the enum is important, make sure to add new values * at the end and handle version serialization properly. * diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java index 19601483d5607..6fc0e535ef4dc 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java @@ -26,7 +26,7 @@ * This {@link AwarenessReplicaBalance} gives total unique values of awareness attributes * It takes in effect only iff cluster.routing.allocation.awareness.attributes and * cluster.routing.allocation.awareness.force.zone.values both are specified. - * + *

                        * This is used in enforcing total copy of shard is a maximum of unique values of awareness attributes * Helps in balancing shards across all awareness attributes and ensuring high availability of data. */ diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java index f209e993518c1..ae2d4a0926194 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java @@ -36,14 +36,14 @@ public class ConstraintTypes { /** * Constraint to control number of shards of an index allocated on a single * node. - * + *

                        * In current weight function implementation, when a node has significantly * fewer shards than other nodes (e.g. during single new node addition or node * replacement), its weight is much less than other nodes. All shard allocations * at this time tend to land on the new node with skewed weight. This breaks * index level balance in the cluster, by creating all shards of the same index * on one node, often resulting in a hotspot on that node. - * + *

                        * This constraint is breached when balancer attempts to allocate more than * average shards per index per node. */ diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java index ae8d92dae6811..7fc78b05880f3 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java @@ -59,9 +59,9 @@ /** * Observer that tracks changes made to RoutingNodes in order to update the primary terms and in-sync allocation ids in * {@link IndexMetadata} once the allocation round has completed. - * + *

                        * Primary terms are updated on primary initialization or when an active primary fails. - * + *

                        * Allocation ids are added for shards that become active and removed for shards that stop being active. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 90eff50fd9b5d..41ace0e7661fe 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -336,7 +336,7 @@ public boolean getPreferPrimaryBalance() { *

                      • *
                      * weight(node, index) = weightindex(node, index) + weightnode(node, index) - * + *

                      * package-private for testing */ static class WeightFunction { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index 3365b58d92a63..75448520a499c 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -530,7 +530,7 @@ private void checkAndAddInEligibleTargetNode(RoutingNode targetNode) { /** * Move started shards that can not be allocated to a node anymore - * + *

                      * For each shard to be moved this function executes a move operation * to the minimal eligible node with respect to the * weight function. If a shard is moved the shard will be set to diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java index 63d8c656f5049..29e9acca4e6c2 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java @@ -63,12 +63,12 @@ public interface ShardsAllocator { * Returns the decision for where a shard should reside in the cluster. If the shard is unassigned, * then the {@link AllocateUnassignedDecision} will be non-null. If the shard is not in the unassigned * state, then the {@link MoveDecision} will be non-null. - * + *

                      * This method is primarily used by the cluster allocation explain API to provide detailed explanations * for the allocation of a single shard. Implementations of the {@link #allocate(RoutingAllocation)} method * may use the results of this method implementation to decide on allocating shards in the routing table * to the cluster. - * + *

                      * If an implementation of this interface does not support explaining decisions for a single shard through * the cluster explain API, then this method should throw a {@code UnsupportedOperationException}. */ diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java index 7fffb0299af85..def0411853643 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java @@ -43,7 +43,7 @@ /** * A command to move shards in some way. - * + *

                      * Commands are registered in {@link NetworkModule}. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java index 24c3fd7f34e4a..85f193c8c5580 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java @@ -109,7 +109,7 @@ public Decision canRebalance(RoutingAllocation allocation) { * Returns a {@link Decision} whether the given primary shard can be * forcibly allocated on the given node. This method should only be called * for unassigned primary shards where the node has a shard copy on disk. - * + *

                      * Note: all implementations that override this behavior should take into account * the results of {@link #canAllocate(ShardRouting, RoutingNode, RoutingAllocation)} * before making a decision on force allocation, because force allocation should only diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 1bd47f111591d..2c7df6b81e676 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -73,23 +73,23 @@ /** * The {@link DiskThresholdDecider} checks that the node a shard is potentially * being allocated to has enough disk space. - * + *

                      * It has three configurable settings, all of which can be changed dynamically: - * + *

                      * cluster.routing.allocation.disk.watermark.low is the low disk * watermark. New shards will not allocated to a node with usage higher than this, * although this watermark may be passed by allocating a shard. It defaults to * 0.85 (85.0%). - * + *

                      * cluster.routing.allocation.disk.watermark.high is the high disk * watermark. If a node has usage higher than this, shards are not allowed to * remain on the node. In addition, if allocating a shard to a node causes the * node to pass this watermark, it will not be allowed. It defaults to * 0.90 (90.0%). - * + *

                      * Both watermark settings are expressed in terms of used disk percentage, or * exact byte values for free space (like "500mb") - * + *

                      * cluster.routing.allocation.disk.threshold_enabled is used to * enable or disable this decider. It defaults to true (enabled). * @@ -119,7 +119,7 @@ public DiskThresholdDecider(Settings settings, ClusterSettings clusterSettings) /** * Returns the size of all shards that are currently being relocated to * the node, but may not be finished transferring yet. - * + *

                      * If subtractShardsMovingAway is true then the size of shards moving away is subtracted from the total size of all shards */ public static long sizeOfRelocatingShards( diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java index 1680f2d8cad1d..c2eccdbc6ed26 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java @@ -44,7 +44,7 @@ /** * An allocation decider that prevents multiple instances of the same shard to * be allocated on the same {@code node}. - * + *

                      * The {@link #CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING} setting allows to perform a check to prevent * allocation of multiple instances of the same shard on a single {@code host}, * based on host name and host address. Defaults to `false`, meaning that no diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 3a9fdf0ea10cf..26a04de31ce39 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -323,7 +323,7 @@ private Decision allocateShardCopies( * - the initializing shard routing if we want to assign the initializing shard to this node instead * - the started shard routing in case if we want to check if we can relocate to this node. * - the relocating shard routing if we want to relocate to this node now instead. - * + *

                      * This method returns the corresponding initializing shard that would be allocated to this node. */ private ShardRouting initializingShard(ShardRouting shardRouting, String currentNodeId) { diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index 007508162ba14..8e94e7cab23d3 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -294,7 +294,7 @@ public void addLocalNodeMasterListener(LocalNodeMasterListener listener) { /** * Adds a cluster state listener that is expected to be removed during a short period of time. * If provided, the listener will be notified once a specific time has elapsed. - * + *

                      * NOTE: the listener is not removed on timeout. This is the responsibility of the caller. */ public void addTimeoutListener(@Nullable final TimeValue timeout, final TimeoutClusterStateListener listener) { diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java index 8da6b1b941f83..afc4e36ec352e 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java @@ -29,7 +29,7 @@ /** * This class does throttling on task submission to cluster manager node, it uses throttling key defined in various executors * as key for throttling. Throttling will be performed over task executor's class level, different task types have different executors class. - * + *

                      * Set specific setting to for setting the threshold of throttling of particular task type. * e.g : Set "cluster_manager.throttling.thresholds.put_mapping" to set throttling limit of "put mapping" tasks, * Set it to default value(-1) to disable the throttling for this task type. @@ -117,9 +117,9 @@ public static TimeValue getMaxDelayForRetry() { * * Register task to cluster service with task key, * * override getClusterManagerThrottlingKey method with above task key in task executor. * * Verify that throttled tasks would be retried from data nodes - * + *

                      * Added retry mechanism in TransportClusterManagerNodeAction, so it would be retried for customer generated tasks. - * + *

                      * If tasks are not getting retried then we can register with false flag, so user won't be able to configure threshold limits for it. */ protected ThrottlingKey registerClusterManagerTask(String taskKey, boolean throttlingEnabled) { @@ -236,7 +236,7 @@ public void onBeginSubmit(List tasks) { * It may start throwing throttling exception to older nodes in cluster. * Older version nodes will not be equipped to handle the throttling exception and * this may result in unexpected behavior where internal tasks would start failing without any retries. - * + *

                      * For every task submission request, it will validate if nodes version is greater or equal to 2.5.0 and set the startThrottling flag. * Once the startThrottling flag is set, it will not perform check for next set of tasks. */ diff --git a/server/src/main/java/org/opensearch/common/Randomness.java b/server/src/main/java/org/opensearch/common/Randomness.java index 2c60e848b9db9..221bc95c41f31 100644 --- a/server/src/main/java/org/opensearch/common/Randomness.java +++ b/server/src/main/java/org/opensearch/common/Randomness.java @@ -127,7 +127,7 @@ public static Random get() { /** * Provides a secure source of randomness. - * + *

                      * This acts exactly similar to {@link #get()}, but returning a new {@link SecureRandom}. */ public static SecureRandom createSecure() { diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java index 3cdb1ce30b68d..2e25a532b5abf 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java @@ -93,10 +93,10 @@ public interface BlobContainer { /** * Provides a hint to clients for a suitable length to use with {@link BlobContainer#readBlob(String, long, long)}. - * + *

                      * Some blob containers have nontrivial costs attached to each readBlob call, so it is a good idea for consumers to speculatively * request more data than they need right now and to re-use this stream for future needs if possible. - * + *

                      * Also, some blob containers return streams that are expensive to close before the stream has been fully consumed, and the cost may * depend on the length of the data that was left unconsumed. For these containers it's best to bound the cost of a partial read by * bounding the length of the data requested. @@ -131,7 +131,7 @@ default long readBlobPreferredLength() { /** * Reads blob content from the input stream and writes it to the container in a new blob with the given name, * using an atomic write operation if the implementation supports it. - * + *

                      * This method assumes the container does not already contain a blob of the same blobName. If a blob by the * same name already exists, the operation will fail and an {@link IOException} will be thrown. * diff --git a/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java index 394855671688a..b6644ffd16bab 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java @@ -69,7 +69,7 @@ /** * A file system based implementation of {@link org.opensearch.common.blobstore.BlobContainer}. * All blobs in the container are stored on a file system, the location of which is specified by the {@link BlobPath}. - * + *

                      * Note that the methods in this implementation of {@link org.opensearch.common.blobstore.BlobContainer} may * additionally throw a {@link java.lang.SecurityException} if the configured {@link java.lang.SecurityManager} * does not permit read and/or write access to the underlying files. @@ -258,7 +258,7 @@ public static String tempBlobName(final String blobName) { /** * Returns true if the blob is a leftover temporary blob. - * + *

                      * The temporary blobs might be left after failed atomic write operation. */ public static boolean isTempBlobName(final String blobName) { diff --git a/server/src/main/java/org/opensearch/common/breaker/ChildMemoryCircuitBreaker.java b/server/src/main/java/org/opensearch/common/breaker/ChildMemoryCircuitBreaker.java index de4e6ad433c55..c9b498c3ec6fa 100644 --- a/server/src/main/java/org/opensearch/common/breaker/ChildMemoryCircuitBreaker.java +++ b/server/src/main/java/org/opensearch/common/breaker/ChildMemoryCircuitBreaker.java @@ -204,7 +204,7 @@ private long limit(long bytes, String label, double overheadConstant, long memor /** * Add an exact number of bytes, not checking for tripping the * circuit breaker. This bypasses the overheadConstant multiplication. - * + *

                      * Also does not check with the parent breaker to see if the parent limit * has been exceeded. * diff --git a/server/src/main/java/org/opensearch/common/cache/Cache.java b/server/src/main/java/org/opensearch/common/cache/Cache.java index 0ebef1556424b..0b2b608b55df0 100644 --- a/server/src/main/java/org/opensearch/common/cache/Cache.java +++ b/server/src/main/java/org/opensearch/common/cache/Cache.java @@ -403,7 +403,7 @@ private V get(K key, long now, Consumer> onExpiration) { * If the specified key is not already associated with a value (or is mapped to null), attempts to compute its * value using the given mapping function and enters it into this map unless null. The load method for a given key * will be invoked at most once. - * + *

                      * Use of different {@link CacheLoader} implementations on the same key concurrently may result in only the first * loader function being called and the second will be returned the result provided by the first including any exceptions * thrown during the execution of the first. diff --git a/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java b/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java index 5ce77cdc75fe5..de4304f0e1fba 100644 --- a/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java +++ b/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java @@ -49,15 +49,15 @@ /** * An immutable map whose writes result in a new copy of the map to be created. - * + *

                      * This is essentially a hash array mapped trie: inner nodes use a bitmap in * order to map hashes to slots by counting ones. In case of a collision (two * values having the same 32-bits hash), a leaf node is created which stores * and searches for values sequentially. - * + *

                      * Reads and writes both perform in logarithmic time. Null keys and values are * not supported. - * + *

                      * This structure might need to perform several object creations per write so * it is better suited for work-loads that are not too write-intensive. * @@ -250,7 +250,7 @@ public static T[] insertElement(final T[] array, final T element, final int * and use a bitmap in order to associate hashes to them. For example, if * an inner node contains 5 values, then 5 bits will be set in the bitmap * and the ordinal of the bit set in this bit map will be the slot number. - * + *

                      * As a consequence, the number of slots in an inner node is equal to the * number of one bits in the bitmap. * diff --git a/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java b/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java index 0334d367ffdbc..1622457ba27cc 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java +++ b/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java @@ -221,11 +221,11 @@ void validateLinearRing(CoordinateNode coordinates, boolean coerce) { @Override CoordinateNode validate(CoordinateNode coordinates, boolean coerce) { - /** - * Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring) - * A LinearRing is closed LineString with 4 or more positions. The first and last positions - * are equivalent (they represent equivalent points). Though a LinearRing is not explicitly - * represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition. + /* + Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring) + A LinearRing is closed LineString with 4 or more positions. The first and last positions + are equivalent (they represent equivalent points). Though a LinearRing is not explicitly + represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition. */ if (coordinates.children == null || coordinates.children.isEmpty()) { throw new OpenSearchParseException( diff --git a/server/src/main/java/org/opensearch/common/geo/GeoUtils.java b/server/src/main/java/org/opensearch/common/geo/GeoUtils.java index 393c238cb3b2f..8c566c4191e4f 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeoUtils.java +++ b/server/src/main/java/org/opensearch/common/geo/GeoUtils.java @@ -665,7 +665,7 @@ public static GeoPoint parseFromString(String val) { /** * Parse a precision that can be expressed as an integer or a distance measure like "1km", "10m". - * + *

                      * The precision is expressed as a number between 1 and 12 and indicates the length of geohash * used to represent geo points. * @@ -696,7 +696,7 @@ public static int parsePrecision(XContentParser parser) throws IOException, Open /** * Checks that the precision is within range supported by opensearch - between 1 and 12 - * + *

                      * Returns the precision value if it is in the range and throws an IllegalArgumentException if it * is outside the range. */ diff --git a/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java b/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java index 56146fc8197be..93c7f4b93679a 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java +++ b/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java @@ -63,7 +63,7 @@ public interface GeometryFormat { /** * Serializes the geometry into a standard Java object. - * + *

                      * For example, the GeoJson format returns the geometry as a map, while WKT returns a string. */ Object toXContentAsObject(ParsedFormat geometry); diff --git a/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java b/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java index b436787220eb0..9e118ab2de3a5 100644 --- a/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java +++ b/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java @@ -177,11 +177,11 @@ public PolygonBuilder close() { } private static void validateLinearRing(LineStringBuilder lineString) { - /** - * Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring) - * A LinearRing is closed LineString with 4 or more positions. The first and last positions - * are equivalent (they represent equivalent points). Though a LinearRing is not explicitly - * represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition. + /* + Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring) + A LinearRing is closed LineString with 4 or more positions. The first and last positions + are equivalent (they represent equivalent points). Though a LinearRing is not explicitly + represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition. */ List points = lineString.coordinates; if (points.size() < 4) { diff --git a/server/src/main/java/org/opensearch/common/geo/parsers/GeoJsonParser.java b/server/src/main/java/org/opensearch/common/geo/parsers/GeoJsonParser.java index 37d42ce600b6d..8d473ae6721d2 100644 --- a/server/src/main/java/org/opensearch/common/geo/parsers/GeoJsonParser.java +++ b/server/src/main/java/org/opensearch/common/geo/parsers/GeoJsonParser.java @@ -52,7 +52,7 @@ /** * Parses shape geometry represented in geojson - * + *

                      * complies with geojson specification: https://tools.ietf.org/html/rfc7946 * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java b/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java index d99d1daf46a2a..b199da0f3691a 100644 --- a/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java +++ b/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java @@ -58,7 +58,7 @@ /** * Parses shape geometry represented in WKT format - * + *

                      * complies with OGC® document: 12-063r5 and ISO/IEC 13249-3:2016 standard * located at http://docs.opengeospatial.org/is/12-063r5/12-063r5.html * diff --git a/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java b/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java index 8ba0bd7ee1be4..e481ffd460798 100644 --- a/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java +++ b/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java @@ -93,7 +93,7 @@ protected static long fmix(long k) { /** * Compute the hash of the MurmurHash3_x64_128 hashing function. - * + *

                      * Note, this hashing function might be used to persist hashes, so if the way hashes are computed * changes for some reason, it needs to be addressed (like in BloomFilter and MurmurHashField). */ diff --git a/server/src/main/java/org/opensearch/common/inject/Initializer.java b/server/src/main/java/org/opensearch/common/inject/Initializer.java index e806eba6df707..b88b01c03c018 100644 --- a/server/src/main/java/org/opensearch/common/inject/Initializer.java +++ b/server/src/main/java/org/opensearch/common/inject/Initializer.java @@ -68,9 +68,8 @@ class Initializer { /** * Registers an instance for member injection when that step is performed. * - * @param instance an instance that optionally has members to be injected (each annotated with - * @param source the source location that this injection was requested - * @Inject). + * @param instance an instance that optionally has members to be injected (each annotated with {@code @Inject}). + * @param source the source location that this injection was requested */ public Initializable requestInjection(InjectorImpl injector, T instance, Object source, Set injectionPoints) { Objects.requireNonNull(source); diff --git a/server/src/main/java/org/opensearch/common/io/stream/Streamables.java b/server/src/main/java/org/opensearch/common/io/stream/Streamables.java index fd9ed6e5b18fe..f1e5f5f22d527 100644 --- a/server/src/main/java/org/opensearch/common/io/stream/Streamables.java +++ b/server/src/main/java/org/opensearch/common/io/stream/Streamables.java @@ -40,7 +40,7 @@ public static void registerStreamables() { * Registers writers by class type */ private static void registerWriters() { - /** {@link GeoPoint} */ + /* {@link GeoPoint} */ WriteableRegistry.registerWriter(GeoPoint.class, (o, v) -> { o.writeByte((byte) 22); ((GeoPoint) v).writeTo(o); @@ -53,7 +53,7 @@ private static void registerWriters() { * NOTE: see {@code StreamOutput#WRITERS} for all registered ordinals */ private static void registerReaders() { - /** {@link GeoPoint} */ + /* {@link GeoPoint} */ WriteableRegistry.registerReader(Byte.valueOf((byte) 22), GeoPoint::new); } } diff --git a/server/src/main/java/org/opensearch/common/joda/JodaDateMathParser.java b/server/src/main/java/org/opensearch/common/joda/JodaDateMathParser.java index 0de6dec1c25bd..ae38e9a6a8073 100644 --- a/server/src/main/java/org/opensearch/common/joda/JodaDateMathParser.java +++ b/server/src/main/java/org/opensearch/common/joda/JodaDateMathParser.java @@ -46,7 +46,7 @@ /** * A parser for date/time formatted text with optional date math. - * + *

                      * The format of the datetime is configurable, and unix timestamps can also be used. Datemath * is appended to a datetime with the following syntax: * ||[+-/](\d+)?[yMwdhHms]. diff --git a/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java b/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java index e259d5d9e3e33..ed324e4e62d8f 100644 --- a/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java +++ b/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java @@ -47,7 +47,7 @@ * Outputs the Throwable portion of the LoggingEvent as a Json formatted field with array * "exception": [ "stacktrace", "lines", "as", "array", "elements" ] - * + *

                      * Reusing @link org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter which already converts a Throwable from * LoggingEvent into a multiline string * diff --git a/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java b/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java index 0ffd633e5a967..17b75ab22f3ed 100644 --- a/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java +++ b/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java @@ -55,7 +55,7 @@ * mappings as segments that were not known before are added and prevents the * structure from growing indefinitely by registering close listeners on these * segments so that at any time it only tracks live segments. - * + *

                      * NOTE: This is heavy. Avoid using this class unless absolutely required. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/common/rounding/Rounding.java b/server/src/main/java/org/opensearch/common/rounding/Rounding.java index 857031bc783f4..41e808b64f7d9 100644 --- a/server/src/main/java/org/opensearch/common/rounding/Rounding.java +++ b/server/src/main/java/org/opensearch/common/rounding/Rounding.java @@ -45,7 +45,7 @@ /** * A strategy for rounding long values. - * + *

                      * Use the java based Rounding class where applicable * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java index 8b7a2a82e5cb1..117ed66fcb451 100644 --- a/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java @@ -254,7 +254,7 @@ public synchronized void addSettingsUpdateConsumer(Setting setting, Consu /** * Adds a settings consumer that is only executed if any setting in the supplied list of settings is changed. In that case all the * settings are specified in the argument are returned. - * + *

                      * Also automatically adds empty consumers for all settings in order to activate logging */ public synchronized void addSettingsUpdateConsumer(Consumer consumer, List> settings) { @@ -265,7 +265,7 @@ public synchronized void addSettingsUpdateConsumer(Consumer consumer, * Adds a settings consumer that is only executed if any setting in the supplied list of settings is changed. In that case all the * settings are specified in the argument are returned. The validator is run across all specified settings before the settings are * applied. - * + *

                      * Also automatically adds empty consumers for all settings in order to activate logging */ public synchronized void addSettingsUpdateConsumer( diff --git a/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java index f25dd872fc703..1ad3b7ab8875a 100644 --- a/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java @@ -88,7 +88,7 @@ /** * A disk based container for sensitive settings in OpenSearch. - * + *

                      * Loading a keystore has 2 phases. First, call {@link #load(Path)}. Then call * {@link #decrypt(char[])} with the keystore password, or an empty char array if * {@link #hasPassword()} is {@code false}. Loading and decrypting should happen @@ -147,7 +147,7 @@ private static class Entry { /** * The number of bits for the cipher key. - * + *

                      * Note: The Oracle JDK 8 ships with a limited JCE policy that restricts key length for AES to 128 bits. * This can be increased to 256 bits once minimum java 9 is the minimum java version. * See http://www.oracle.com/technetwork/java/javase/terms/readme/jdk9-readme-3852447.html#jce @@ -234,7 +234,7 @@ public static KeyStoreWrapper load(Path configDir) throws IOException { /** * Loads information about the OpenSearch keystore from the provided config directory. - * + *

                      * {@link #decrypt(char[])} must be called before reading or writing any entries. * Returns {@code null} if no keystore exists. */ @@ -358,7 +358,7 @@ private Cipher createCipher(int opmode, char[] password, byte[] salt, byte[] iv) /** * Decrypts the underlying keystore data. - * + *

                      * This may only be called once. */ public void decrypt(char[] password) throws GeneralSecurityException, IOException { diff --git a/server/src/main/java/org/opensearch/common/settings/SecureSetting.java b/server/src/main/java/org/opensearch/common/settings/SecureSetting.java index f2ccc01a4c7e6..1855270b016b3 100644 --- a/server/src/main/java/org/opensearch/common/settings/SecureSetting.java +++ b/server/src/main/java/org/opensearch/common/settings/SecureSetting.java @@ -45,7 +45,7 @@ /** * A secure setting. - * + *

                      * This class allows access to settings from the OpenSearch keystore. * * @opensearch.internal @@ -152,7 +152,7 @@ public void diff(Settings.Builder builder, Settings source, Settings defaultSett /** * A setting which contains a sensitive string. - * + *

                      * This may be any sensitive string, e.g. a username, a password, an auth token, etc. */ public static Setting secureString(String name, Setting fallback, Property... properties) { @@ -179,7 +179,7 @@ public static Setting insecureString(String name, String secureNam /** * A setting which contains a file. Reading the setting opens an input stream to the file. - * + *

                      * This may be any sensitive file, e.g. a set of credentials normally in plaintext. */ public static Setting secureFile(String name, Setting fallback, Property... properties) { diff --git a/server/src/main/java/org/opensearch/common/settings/Settings.java b/server/src/main/java/org/opensearch/common/settings/Settings.java index d202feb0786bf..0557884f0f8ad 100644 --- a/server/src/main/java/org/opensearch/common/settings/Settings.java +++ b/server/src/main/java/org/opensearch/common/settings/Settings.java @@ -1222,7 +1222,7 @@ public boolean shouldRemoveMissingPlaceholder(String placeholderName) { /** * Checks that all settings(except archived settings and wildcards) in the builder start with the specified prefix. - * + *

                      * If a setting doesn't start with the prefix, the builder appends the prefix to such setting. */ public Builder normalizePrefix(String prefix) { diff --git a/server/src/main/java/org/opensearch/common/time/DateFormatters.java b/server/src/main/java/org/opensearch/common/time/DateFormatters.java index 6c8b9282d8a77..e74ab687b903b 100644 --- a/server/src/main/java/org/opensearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/opensearch/common/time/DateFormatters.java @@ -2172,10 +2172,10 @@ static DateFormatter forPattern(String input) { * or Instant.from(accessor). This results in a huge performance penalty and should be prevented * This method prevents exceptions by querying the accessor for certain capabilities * and then act on it accordingly - * + *

                      * This action assumes that we can reliably fall back to some defaults if not all parts of a * zoned date time are set - * + *

                      * - If a zoned date time is passed, it is returned * - If no timezone is found, ZoneOffset.UTC is used * - If we find a time and a date, converting to a ZonedDateTime is straight forward, diff --git a/server/src/main/java/org/opensearch/common/time/DateMathParser.java b/server/src/main/java/org/opensearch/common/time/DateMathParser.java index f6573eaa90286..7088d6cb7a498 100644 --- a/server/src/main/java/org/opensearch/common/time/DateMathParser.java +++ b/server/src/main/java/org/opensearch/common/time/DateMathParser.java @@ -64,12 +64,12 @@ default Instant parse(String text, LongSupplier now, boolean roundUpProperty, Da /** * Parse text, that potentially contains date math into the milliseconds since the epoch - * + *

                      * Examples are - * + *

                      * 2014-11-18||-2y subtracts two years from the input date * now/m rounds the current time to minute granularity - * + *

                      * Supported rounding units are * y year * M month diff --git a/server/src/main/java/org/opensearch/common/time/DateUtils.java b/server/src/main/java/org/opensearch/common/time/DateUtils.java index 021b8a3be8b23..7ab395a1117e7 100644 --- a/server/src/main/java/org/opensearch/common/time/DateUtils.java +++ b/server/src/main/java/org/opensearch/common/time/DateUtils.java @@ -342,7 +342,7 @@ public static long toMilliSeconds(long nanoSecondsSinceEpoch) { /** * Rounds the given utc milliseconds sicne the epoch down to the next unit millis - * + *

                      * Note: This does not check for correctness of the result, as this only works with units smaller or equal than a day * In order to ensure the performance of this methods, there are no guards or checks in it * diff --git a/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java b/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java index f3459a5857b9e..7fc39e063efb5 100644 --- a/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java +++ b/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java @@ -32,12 +32,12 @@ * This class has been copied from different locations within the joda time package, as * these methods fast when used for rounding, as they do not require conversion to java * time objects - * + *

                      * This code has been copied from jodatime 2.10.1 * The source can be found at https://github.com/JodaOrg/joda-time/tree/v2.10.1 - * + *

                      * See following methods have been copied (along with required helper variables) - * + *

                      * - org.joda.time.chrono.GregorianChronology.calculateFirstDayOfYearMillis(int year) * - org.joda.time.chrono.BasicChronology.getYear(int year) * - org.joda.time.chrono.BasicGJChronology.getMonthOfYear(long utcMillis, int year) diff --git a/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java index 14191357fb8aa..f711b14aeb928 100644 --- a/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java +++ b/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java @@ -275,7 +275,7 @@ public TemporalAccessor parse(String input) { * it will continue iterating if the previous parser failed. The pattern must fully match, meaning whole input was used. * This also means that this method depends on DateTimeFormatter.ClassicFormat.parseObject * which does not throw exceptions when parsing failed. - * + *

                      * The approach with collection of parsers was taken because java-time requires ordering on optional (composite) * patterns. Joda does not suffer from this. * https://bugs.openjdk.java.net/browse/JDK-8188771 diff --git a/server/src/main/java/org/opensearch/common/time/JavaDateMathParser.java b/server/src/main/java/org/opensearch/common/time/JavaDateMathParser.java index 0536324b6516b..340901e7ac8e2 100644 --- a/server/src/main/java/org/opensearch/common/time/JavaDateMathParser.java +++ b/server/src/main/java/org/opensearch/common/time/JavaDateMathParser.java @@ -51,7 +51,7 @@ /** * A parser for date/time formatted text with optional date math. - * + *

                      * The format of the datetime is configurable, and unix timestamps can also be used. Datemath * is appended to a datetime with the following syntax: * ||[+-/](\d+)?[yMwdhHms]. diff --git a/server/src/main/java/org/opensearch/common/util/BinarySearcher.java b/server/src/main/java/org/opensearch/common/util/BinarySearcher.java index ca63c170c0ccd..e4315f8699206 100644 --- a/server/src/main/java/org/opensearch/common/util/BinarySearcher.java +++ b/server/src/main/java/org/opensearch/common/util/BinarySearcher.java @@ -34,14 +34,14 @@ /** * Performs binary search on an arbitrary data structure. - * + *

                      * To do a search, create a subclass and implement custom {@link #compare(int)} and {@link #distance(int)} methods. - * + *

                      * {@link BinarySearcher} knows nothing about the value being searched for or the underlying data structure. * These things should be determined by the subclass in its overridden methods. - * + *

                      * Refer to {@link BigArrays.DoubleBinarySearcher} for an example. - * + *

                      * NOTE: this class is not thread safe * * @opensearch.internal @@ -74,7 +74,7 @@ private int getClosestIndex(int index1, int index2) { /** * Uses a binary search to determine the index of the element within the index range {from, ... , to} that is * closest to the search value. - * + *

                      * Unlike most binary search implementations, the value being searched for is not an argument to search method. * Rather, this value should be stored by the subclass along with the underlying array. * diff --git a/server/src/main/java/org/opensearch/common/util/CancellableThreads.java b/server/src/main/java/org/opensearch/common/util/CancellableThreads.java index 8bc3ca3affb12..67dd4b848f4c0 100644 --- a/server/src/main/java/org/opensearch/common/util/CancellableThreads.java +++ b/server/src/main/java/org/opensearch/common/util/CancellableThreads.java @@ -45,7 +45,7 @@ * A utility class for multi threaded operation that needs to be cancellable via interrupts. Every cancellable operation should be * executed via {@link #execute(Interruptible)}, which will capture the executing thread and make sure it is interrupted in the case * of cancellation. - * + *

                      * Cancellation policy: This class does not support external interruption via Thread#interrupt(). Always use #cancel() instead. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/common/util/CuckooFilter.java b/server/src/main/java/org/opensearch/common/util/CuckooFilter.java index 0c792b37ccfa9..28b55f70855d6 100644 --- a/server/src/main/java/org/opensearch/common/util/CuckooFilter.java +++ b/server/src/main/java/org/opensearch/common/util/CuckooFilter.java @@ -46,33 +46,33 @@ /** * An approximate set membership datastructure - * + *

                      * CuckooFilters are similar to Bloom Filters in usage; values are inserted, and the Cuckoo * can be asked if it has seen a particular value before. Because the structure is approximate, * it can return false positives (says it has seen an item when it has not). False negatives * are not possible though; if the structure says it _has not_ seen an item, that can be * trusted. - * + *

                      * The filter can "saturate" at which point the map has hit it's configured load factor (or near enough * that a large number of evictions are not able to find a free slot) and will refuse to accept * any new insertions. - * + *

                      * NOTE: this version does not support deletions, and as such does not save duplicate * fingerprints (e.g. when inserting, if the fingerprint is already present in the * candidate buckets, it is not inserted). By not saving duplicates, the CuckooFilter * loses the ability to delete values. But not by allowing deletions, we can save space * (do not need to waste slots on duplicate fingerprints), and we do not need to worry * about inserts "overflowing" a bucket because the same item has been repeated repeatedly - * + *

                      * NOTE: this CuckooFilter exposes a number of Expert APIs which assume the caller has * intimate knowledge about how the algorithm works. It is recommended to use * {@link SetBackedScalingCuckooFilter} instead. - * + *

                      * Based on the paper: - * + *

                      * Fan, Bin, et al. "Cuckoo filter: Practically better than bloom." * Proceedings of the 10th ACM International on Conference on emerging Networking Experiments and Technologies. ACM, 2014. - * + *

                      * https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf * * @opensearch.internal @@ -200,7 +200,7 @@ public int getCount() { /** * Returns the number of buckets that has been chosen based * on the initial configuration - * + *

                      * Expert-level API */ int getNumBuckets() { @@ -209,7 +209,7 @@ int getNumBuckets() { /** * Returns the number of bits used per entry - * + *

                      * Expert-level API */ int getBitsPerEntry() { @@ -220,7 +220,7 @@ int getBitsPerEntry() { * Returns the cached fingerprint mask. This is simply a mask for the * first bitsPerEntry bits, used by {@link CuckooFilter#fingerprint(int, int, int)} * to generate the fingerprint of a hash - * + *

                      * Expert-level API */ int getFingerprintMask() { @@ -230,7 +230,7 @@ int getFingerprintMask() { /** * Returns an iterator that returns the long[] representation of each bucket. The value * inside each long will be a fingerprint (or 0L, representing empty). - * + *

                      * Expert-level API */ Iterator getBuckets() { @@ -267,7 +267,7 @@ boolean mightContain(long hash) { /** * Returns true if the bucket or it's alternate bucket contains the fingerprint. - * + *

                      * Expert-level API, use {@link CuckooFilter#mightContain(long)} to check if * a value is in the filter. */ @@ -307,7 +307,7 @@ boolean add(long hash) { /** * Attempts to merge the fingerprint into the specified bucket or it's alternate bucket. * Returns true if the insertion was successful, false if the filter is saturated. - * + *

                      * Expert-level API, use {@link CuckooFilter#add(long)} to insert * values into the filter */ @@ -351,7 +351,7 @@ boolean mergeFingerprint(int bucket, int fingerprint) { * Low-level insert method. Attempts to write the fingerprint into an empty entry * at this bucket's position. Returns true if that was sucessful, false if all entries * were occupied. - * + *

                      * If the fingerprint already exists in one of the entries, it will not duplicate the * fingerprint like the original paper. This means the filter _cannot_ support deletes, * but is not sensitive to "overflowing" buckets with repeated inserts @@ -376,10 +376,10 @@ private boolean tryInsert(int bucket, int fingerprint) { /** * Converts a hash into a bucket index (primary or alternate). - * + *

                      * If the hash is negative, this flips the bits. The hash is then modulo numBuckets * to get the final index. - * + *

                      * Expert-level API */ static int hashToIndex(int hash, int numBuckets) { @@ -388,16 +388,16 @@ static int hashToIndex(int hash, int numBuckets) { /** * Calculates the alternate bucket for a given bucket:fingerprint tuple - * + *

                      * The alternate bucket is the fingerprint multiplied by a mixing constant, * then xor'd against the bucket. This new value is modulo'd against * the buckets via {@link CuckooFilter#hashToIndex(int, int)} to get the final * index. - * + *

                      * Note that the xor makes this operation reversible as long as we have the * fingerprint and current bucket (regardless of if that bucket was the primary * or alternate). - * + *

                      * Expert-level API */ static int alternateIndex(int bucket, int fingerprint, int numBuckets) { @@ -424,10 +424,10 @@ private int getOffset(int bucket, int position) { /** * Calculates the fingerprint for a given hash. - * + *

                      * The fingerprint is simply the first `bitsPerEntry` number of bits that are non-zero. * If the entire hash is zero, `(int) 1` is used - * + *

                      * Expert-level API */ static int fingerprint(int hash, int bitsPerEntry, int fingerprintMask) { @@ -501,7 +501,7 @@ private double getLoadFactor(int b) { * Calculates the optimal number of buckets for this filter. The xor used in the bucketing * algorithm requires this to be a power of two, so the optimal number of buckets will * be rounded to the next largest power of two where applicable. - * + *

                      * TODO: there are schemes to avoid powers of two, might want to investigate those */ private int getNumBuckets(long capacity, double loadFactor, int b) { diff --git a/server/src/main/java/org/opensearch/common/util/SetBackedScalingCuckooFilter.java b/server/src/main/java/org/opensearch/common/util/SetBackedScalingCuckooFilter.java index e99eb751babe8..a635160844159 100644 --- a/server/src/main/java/org/opensearch/common/util/SetBackedScalingCuckooFilter.java +++ b/server/src/main/java/org/opensearch/common/util/SetBackedScalingCuckooFilter.java @@ -75,7 +75,7 @@ public class SetBackedScalingCuckooFilter implements Writeable { * This set is used to track the insertions before we convert over to an approximate * filter. This gives us 100% accuracy for small cardinalities. This will be null * if isSetMode = false; - * + *

                      * package-private for testing */ Set hashes; @@ -178,7 +178,7 @@ public void writeTo(StreamOutput out) throws IOException { /** * Registers a circuit breaker with the datastructure. - * + *

                      * CuckooFilter's can "saturate" and refuse to accept any new values. When this happens, * the datastructure scales by adding a new filter. This new filter's bytes will be tracked * in the registered breaker when configured. diff --git a/server/src/main/java/org/opensearch/common/util/TokenBucket.java b/server/src/main/java/org/opensearch/common/util/TokenBucket.java index d2e7e836bf07f..a9ebb86eed8a2 100644 --- a/server/src/main/java/org/opensearch/common/util/TokenBucket.java +++ b/server/src/main/java/org/opensearch/common/util/TokenBucket.java @@ -20,7 +20,7 @@ public class TokenBucket { /** * Defines a monotonically increasing counter. - * + *

                      * Usage examples: * 1. clock = System::nanoTime can be used to perform rate-limiting per unit time * 2. clock = AtomicLong::get can be used to perform rate-limiting per unit number of operations diff --git a/server/src/main/java/org/opensearch/common/util/URIPattern.java b/server/src/main/java/org/opensearch/common/util/URIPattern.java index a3c385e5ea660..49e4b53e20740 100644 --- a/server/src/main/java/org/opensearch/common/util/URIPattern.java +++ b/server/src/main/java/org/opensearch/common/util/URIPattern.java @@ -39,9 +39,9 @@ /** * URI Pattern matcher - * + *

                      * The pattern is URI in which authority, path, query and fragment can be replace with simple pattern. - * + *

                      * For example: foobar://*.local/some_path/*?*#* will match all uris with schema foobar in local domain * with any port, with path that starts some_path and with any query and fragment. * diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java b/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java index b1f4714a90e8e..4357254176358 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java @@ -73,7 +73,7 @@ public void addListener(ActionListener listener, ExecutorService executor) { * notified of a response or exception in a runnable submitted to the ExecutorService provided. * If the future has completed, the listener will be notified immediately without forking to * a different thread. - * + *

                      * It will apply the provided ThreadContext (if not null) when executing the listening. */ public void addListener(ActionListener listener, ExecutorService executor, ThreadContext threadContext) { diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java b/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java index b4673d9534922..95df4486b9d7b 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java @@ -118,8 +118,9 @@ private void addPending(List runnables, List pending, boolean TieBreakingPrioritizedRunnable t = (TieBreakingPrioritizedRunnable) runnable; Runnable innerRunnable = t.runnable; if (innerRunnable != null) { - /** innerRunnable can be null if task is finished but not removed from executor yet, - * see {@link TieBreakingPrioritizedRunnable#run} and {@link TieBreakingPrioritizedRunnable#runAndClean} + /* + innerRunnable can be null if task is finished but not removed from executor yet, + see {@link TieBreakingPrioritizedRunnable#run} and {@link TieBreakingPrioritizedRunnable#runAndClean} */ pending.add(new Pending(super.unwrap(innerRunnable), t.priority(), t.insertionOrder, executing)); } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 4888d25e4a640..fc2e4217bae79 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -143,10 +143,10 @@ public void unregisterThreadContextStatePropagator(final ThreadContextStatePropa */ public StoredContext stashContext() { final ThreadContextStruct context = threadLocal.get(); - /** - * X-Opaque-ID should be preserved in a threadContext in order to propagate this across threads. - * This is needed so the DeprecationLogger in another thread can see the value of X-Opaque-ID provided by a user. - * Otherwise when context is stash, it should be empty. + /* + X-Opaque-ID should be preserved in a threadContext in order to propagate this across threads. + This is needed so the DeprecationLogger in another thread can see the value of X-Opaque-ID provided by a user. + Otherwise when context is stash, it should be empty. */ ThreadContextStruct threadContextStruct = DEFAULT_CONTEXT.putPersistent(context.persistentHeaders); diff --git a/server/src/main/java/org/opensearch/common/xcontent/LoggingDeprecationHandler.java b/server/src/main/java/org/opensearch/common/xcontent/LoggingDeprecationHandler.java index 06d139fa93195..05fc968737394 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/LoggingDeprecationHandler.java +++ b/server/src/main/java/org/opensearch/common/xcontent/LoggingDeprecationHandler.java @@ -54,7 +54,7 @@ public class LoggingDeprecationHandler implements DeprecationHandler { public static final LoggingDeprecationHandler INSTANCE = new LoggingDeprecationHandler(); /** * The logger to which to send deprecation messages. - * + *

                      * This uses ParseField's logger because that is the logger that * we have been using for many releases for deprecated fields. * Changing that will require some research to make super duper diff --git a/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java index 798a58551457f..17bb0a1de267b 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java +++ b/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java @@ -494,7 +494,7 @@ public static BytesReference toXContent(ToXContent toXContent, XContentType xCon /** * Returns the contents of an object as an unparsed BytesReference - * + *

                      * This is useful for things like mappings where we're copying bytes around but don't * actually need to parse their contents, and so avoids building large maps of maps * unnecessarily diff --git a/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java b/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java index adfa871cbfcbe..a87edbb949d39 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java +++ b/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java @@ -117,12 +117,11 @@ private static void extractRawValues(List values, List part, String[] pa /** * For the provided path, return its value in the xContent map. - * + *

                      * Note that in contrast with {@link XContentMapValues#extractRawValues}, array and object values * can be returned. * * @param path the value's path in the map. - * * @return the value associated with the path in the map or 'null' if the path does not exist. */ public static Object extractValue(String path, Map map) { @@ -138,7 +137,7 @@ public static Object extractValue(Map map, String... pathElements) { /** * For the provided path, return its value in the xContent map. - * + *

                      * Note that in contrast with {@link XContentMapValues#extractRawValues}, array and object values * can be returned. * @@ -197,13 +196,13 @@ private static Object extractValue(String[] pathElements, int index, Object curr * Only keep properties in {@code map} that match the {@code includes} but * not the {@code excludes}. An empty list of includes is interpreted as a * wildcard while an empty list of excludes does not match anything. - * + *

                      * If a property matches both an include and an exclude, then the exclude * wins. - * + *

                      * If an object matches, then any of its sub properties are automatically * considered as matching as well, both for includes and excludes. - * + *

                      * Dots in field names are treated as sub objects. So for instance if a * document contains {@code a.b} as a property and {@code a} is an include, * then {@code a.b} will be kept in the filtered map. @@ -555,7 +554,7 @@ public static Map nodeMapValue(Object node, String desc) { /** * Returns an array of string value from a node value. - * + *

                      * If the node represents an array the corresponding array of strings is returned. * Otherwise the node is treated as a comma-separated string. */ diff --git a/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java b/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java index 3159733336057..b663227978e8f 100644 --- a/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java +++ b/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java @@ -48,12 +48,12 @@ /** * An implementation of {@link SeedHostsProvider} that reads hosts/ports * from {@link #UNICAST_HOSTS_FILE}. - * + *

                      * Each host/port that is part of the discovery process must be listed on * a separate line. If the port is left off an entry, we default to the * first port in the {@code transport.port} range. * An example unicast hosts file could read: - * + *

                      * 67.81.244.10 * 67.81.244.11:9305 * 67.81.244.15:9400 diff --git a/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java b/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java index 9785d5b21078e..10185322c2ca6 100644 --- a/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java +++ b/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java @@ -49,7 +49,7 @@ * An implementation of {@link SeedHostsProvider} that reads hosts/ports * from the "discovery.seed_hosts" node setting. If the port is * left off an entry, we default to the first port in the {@code transport.port} range. - * + *

                      * An example setting might look as follows: * [67.81.244.10, 67.81.244.11:9305, 67.81.244.15:9400] * diff --git a/server/src/main/java/org/opensearch/env/Environment.java b/server/src/main/java/org/opensearch/env/Environment.java index 3b87c756ffdae..5a40e45fb22a6 100644 --- a/server/src/main/java/org/opensearch/env/Environment.java +++ b/server/src/main/java/org/opensearch/env/Environment.java @@ -249,7 +249,7 @@ public Path[] repoFiles() { /** * Resolves the specified location against the list of configured repository roots - * + *

                      * If the specified location doesn't match any of the roots, returns null. */ public Path resolveRepoFile(String location) { @@ -259,7 +259,7 @@ public Path resolveRepoFile(String location) { /** * Checks if the specified URL is pointing to the local file system and if it does, resolves the specified url * against the list of configured repository roots - * + *

                      * If the specified url doesn't match any of the roots, returns null. */ public URL resolveRepoURL(URL url) { diff --git a/server/src/main/java/org/opensearch/env/NodeEnvironment.java b/server/src/main/java/org/opensearch/env/NodeEnvironment.java index 3c5ab5ba98875..40c5722308b79 100644 --- a/server/src/main/java/org/opensearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/opensearch/env/NodeEnvironment.java @@ -705,7 +705,7 @@ public List lockAllForIndex( * write operation on a shards data directory like deleting files, creating a new index writer * or recover from a different shard instance into it. If the shard lock can not be acquired * a {@link ShardLockObtainFailedException} is thrown. - * + *

                      * Note: this method will return immediately if the lock can't be acquired. * * @param id the shard ID to lock @@ -778,7 +778,7 @@ public interface ShardLocker { /** * Returns all currently lock shards. - * + *

                      * Note: the shard ids return do not contain a valid Index UUID */ public Set lockedShards() { diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionReader.java b/server/src/main/java/org/opensearch/extensions/ExtensionReader.java index 4556d6537ffdc..fc2ba817ace5f 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionReader.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionReader.java @@ -17,7 +17,7 @@ * a category class used to identify the reader defined within the JVM that the extension is running on. * Additionally, this method takes in the extension's corresponding DiscoveryNode and a byte array (context) that the * extension's reader will be applied to. - * + *

                      * By convention the extensions' reader is a constructor that takes StreamInput as an argument for most classes and a static method for things like enums. * Classes will implement this via a constructor (or a static method in the case of enumerations), it's something that should * look like: diff --git a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java index 59ef894958cbe..853fe03904c53 100644 --- a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java @@ -50,7 +50,7 @@ /** * An abstract class that implements basic functionality for allocating * shards to nodes based on shard copies that already exist in the cluster. - * + *

                      * Individual implementations of this class are responsible for providing * the logic to determine to which nodes (if any) those shards are allocated. * diff --git a/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java b/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java index e7c1ba01e7920..48479691689e5 100644 --- a/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java +++ b/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java @@ -244,7 +244,7 @@ List filterDanglingIndices(Metadata metadata, Map * Dangling importing indices with aliases is dangerous, it could for instance result in inability to write to an existing alias if it * previously had only one index with any is_write_index indication. */ diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index 6b26af148b2ea..d3e7a0c482ee2 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -91,7 +91,7 @@ /** * Loads (and maybe upgrades) cluster metadata at startup, and persistently stores cluster metadata for future restarts. - * + *

                      * When started, ensures that this version is compatible with the state stored on disk, and performs a state upgrade if necessary. Note that the state being * loaded when constructing the instance of this class is not necessarily the state that will be used as {@link ClusterState#metadata()} because it might be * stale or incomplete. Cluster-manager-eligible nodes must perform an election to find a complete and non-stale state, and cluster-manager-ineligible nodes diff --git a/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java b/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java index 8db4736bcdc40..75beb6e29599c 100644 --- a/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java @@ -111,16 +111,16 @@ * Stores cluster metadata in a bare Lucene index (per data path) split across a number of documents. This is used by cluster-manager-eligible nodes * to record the last-accepted cluster state during publication. The metadata is written incrementally where possible, leaving alone any * documents that have not changed. The index has the following fields: - * + *

                      * +------------------------------+-----------------------------+----------------------------------------------+ * | "type" (string field) | "index_uuid" (string field) | "data" (stored binary field in SMILE format) | * +------------------------------+-----------------------------+----------------------------------------------+ * | GLOBAL_TYPE_NAME == "global" | (omitted) | Global metadata | * | INDEX_TYPE_NAME == "index" | Index UUID | Index metadata | * +------------------------------+-----------------------------+----------------------------------------------+ - * + *

                      * Additionally each commit has the following user data: - * + *

                      * +---------------------------+-------------------------+-------------------------------------------------------------------------------+ * | Key symbol | Key literal | Value | * +---------------------------+-------------------------+-------------------------------------------------------------------------------+ @@ -129,7 +129,7 @@ * | NODE_ID_KEY | "node_id" | The (persistent) ID of the node that wrote this metadata | * | NODE_VERSION_KEY | "node_version" | The (ID of the) version of the node that wrote this metadata | * +---------------------------+-------------------------+-------------------------------------------------------------------------------+ - * + *

                      * (the last-accepted term is recorded in Metadata → CoordinationMetadata so does not need repeating here) * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java index 4dc9396751fc9..2807be00feeaa 100644 --- a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java @@ -69,7 +69,7 @@ * that holds a copy of the shard. The shard metadata from each node is compared against the * set of valid allocation IDs and for all valid shard copies (if any), the primary shard allocator * executes the allocation deciders to chose a copy to assign the primary shard to. - * + *

                      * Note that the PrimaryShardAllocator does *not* allocate primaries on index creation * (see {@link org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator}), * nor does it allocate primaries when a primary shard failed and there is a valid replica @@ -386,11 +386,11 @@ protected static NodeShardsResult buildNodeShardsResult( } } - /** - * Orders the active shards copies based on below comparators - * 1. No store exception i.e. shard copy is readable - * 2. Prefer previous primary shard - * 3. Prefer shard copy with the highest replication checkpoint. It is NO-OP for doc rep enabled indices. + /* + Orders the active shards copies based on below comparators + 1. No store exception i.e. shard copy is readable + 2. Prefer previous primary shard + 3. Prefer shard copy with the highest replication checkpoint. It is NO-OP for doc rep enabled indices. */ final Comparator comparator; // allocation preference if (matchAnyShard) { diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java index a6c9319b6dc54..f530052c5bcd1 100644 --- a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java @@ -295,7 +295,7 @@ public AllocateUnassignedDecision makeAllocationDecision( /** * Determines if the shard can be allocated on at least one node based on the allocation deciders. - * + *

                      * Returns the best allocation decision for allocating the shard on any node (i.e. YES if at least one * node decided YES, THROTTLE if at least one node decided THROTTLE, and NO if none of the nodes decided * YES or THROTTLE). If in explain mode, also returns the node-level explanations as the second element diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 164611816e1dc..4a8a0618ffa60 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -735,10 +735,11 @@ private boolean isInvalidClusterUUID(ClusterMetadataManifest manifest) { */ private List getManifestFileNames(String clusterName, String clusterUUID, int limit) throws IllegalStateException { try { - /** - * {@link BlobContainer#listBlobsByPrefixInSortedOrder} will list the latest manifest file first - * as the manifest file name generated via {@link RemoteClusterStateService#getManifestFileName} ensures - * when sorted in LEXICOGRAPHIC order the latest uploaded manifest file comes on top. + + /* + {@link BlobContainer#listBlobsByPrefixInSortedOrder} will list the latest manifest file first + as the manifest file name generated via {@link RemoteClusterStateService#getManifestFileName} ensures + when sorted in LEXICOGRAPHIC order the latest uploaded manifest file comes on top. */ return manifestContainer(clusterName, clusterUUID).listBlobsByPrefixInSortedOrder( MANIFEST_FILE_PREFIX + DELIMITER, diff --git a/server/src/main/java/org/opensearch/http/CorsHandler.java b/server/src/main/java/org/opensearch/http/CorsHandler.java index 862c50ae6ac1f..464ced184d10e 100644 --- a/server/src/main/java/org/opensearch/http/CorsHandler.java +++ b/server/src/main/java/org/opensearch/http/CorsHandler.java @@ -81,7 +81,7 @@ * This file is forked from the https://netty.io project. In particular it combines the following three * files: io.netty.handler.codec.http.cors.CorsHandler, io.netty.handler.codec.http.cors.CorsConfig, and * io.netty.handler.codec.http.cors.CorsConfigBuilder. - * + *

                      * It modifies the original netty code to operate on OpenSearch http request/response abstractions. * Additionally, it removes CORS features that are not used by OpenSearch. * diff --git a/server/src/main/java/org/opensearch/identity/noop/NoopIdentityPlugin.java b/server/src/main/java/org/opensearch/identity/noop/NoopIdentityPlugin.java index c2367383df595..090b1f1d025e0 100644 --- a/server/src/main/java/org/opensearch/identity/noop/NoopIdentityPlugin.java +++ b/server/src/main/java/org/opensearch/identity/noop/NoopIdentityPlugin.java @@ -14,7 +14,7 @@ /** * Implementation of identity plugin that does not enforce authentication or authorization - * + *

                      * This class and related classes in this package will not return nulls or fail access checks * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/identity/noop/NoopSubject.java b/server/src/main/java/org/opensearch/identity/noop/NoopSubject.java index 424a10204aa19..964a218db3cf5 100644 --- a/server/src/main/java/org/opensearch/identity/noop/NoopSubject.java +++ b/server/src/main/java/org/opensearch/identity/noop/NoopSubject.java @@ -17,7 +17,7 @@ /** * Implementation of subject that is always authenticated - * + *

                      * This class and related classes in this package will not return nulls or fail permissions checks * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/identity/tokens/RestTokenExtractor.java b/server/src/main/java/org/opensearch/identity/tokens/RestTokenExtractor.java index 193966001f44c..4bd3ebdded588 100644 --- a/server/src/main/java/org/opensearch/identity/tokens/RestTokenExtractor.java +++ b/server/src/main/java/org/opensearch/identity/tokens/RestTokenExtractor.java @@ -26,7 +26,7 @@ public class RestTokenExtractor { /** * Given a rest request it will extract authentication token - * + *

                      * If no token was found, returns null. */ public static AuthToken extractToken(final RestRequest request) { diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index a53dbe246fa44..84e8e2f41aaf1 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -696,7 +696,7 @@ public IndexSettings getIndexSettings() { /** * Creates a new QueryShardContext. - * + *

                      * Passing a {@code null} {@link IndexSearcher} will return a valid context, however it won't be able to make * {@link IndexReader}-specific optimizations, such as rewriting containing range queries. */ @@ -706,7 +706,7 @@ public QueryShardContext newQueryShardContext(int shardId, IndexSearcher searche /** * Creates a new QueryShardContext. - * + *

                      * Passing a {@code null} {@link IndexSearcher} will return a valid context, however it won't be able to make * {@link IndexReader}-specific optimizations, such as rewriting containing range queries. */ diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index ce6c1a5ad6284..e90e9259f6a5c 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -550,7 +550,7 @@ public static IndexMergePolicy fromString(String text) { /** * This setting controls if unreferenced files will be cleaned up in case segment merge fails due to disk full. - * + *

                      * Defaults to true which means unreferenced files will be cleaned up in case segment merge fails. */ public static final Setting INDEX_UNREFERENCED_FILE_CLEANUP = Setting.boolSetting( @@ -566,7 +566,7 @@ public static IndexMergePolicy fromString(String text) { * documents) on the grounds that a file-based peer recovery may copy all of the documents in the shard over to the new peer, but is * significantly faster than replaying the missing operations on the peer, so once a peer falls far enough behind the primary it makes * more sense to copy all the data over again instead of replaying history. - * + *

                      * Defaults to retaining history for up to 10% of the documents in the shard. This can only be changed in tests, since this setting is * intentionally unregistered. */ diff --git a/server/src/main/java/org/opensearch/index/IndexSortConfig.java b/server/src/main/java/org/opensearch/index/IndexSortConfig.java index 763ed34a1e6a6..9edb268a5126c 100644 --- a/server/src/main/java/org/opensearch/index/IndexSortConfig.java +++ b/server/src/main/java/org/opensearch/index/IndexSortConfig.java @@ -53,7 +53,7 @@ /** * Holds all the information that is used to build the sort order of an index. - * + *

                      * The index sort settings are final and can be defined only at index creation. * These settings are divided in four lists that are merged during the initialization of this class: *

                        diff --git a/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java b/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java index 9e170b448d641..b2b7781a20d26 100644 --- a/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java +++ b/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java @@ -48,14 +48,14 @@ * *
                          *
                        • index.merge.scheduler.max_thread_count: - * + *

                          * The maximum number of threads that may be merging at once. Defaults to * Math.max(1, Math.min(4, {@link OpenSearchExecutors#allocatedProcessors(Settings)} / 2)) * which works well for a good solid-state-disk (SSD). If your index is on * spinning platter drives instead, decrease this to 1. * *

                        • index.merge.scheduler.auto_throttle: - * + *

                          * If this is true (the default), then the merge scheduler will rate-limit IO * (writes) for merges to an adaptive value depending on how many merges are * requested over time. An application with a low indexing rate that diff --git a/server/src/main/java/org/opensearch/index/ShardIndexingPressure.java b/server/src/main/java/org/opensearch/index/ShardIndexingPressure.java index 07768c6769b71..a6135186fb5ff 100644 --- a/server/src/main/java/org/opensearch/index/ShardIndexingPressure.java +++ b/server/src/main/java/org/opensearch/index/ShardIndexingPressure.java @@ -34,7 +34,7 @@ * Interfaces returns Releasable which when triggered will release the acquired accounting tokens values and also * perform necessary actions such as throughput evaluation once the request completes. * Consumers of these interfaces are expected to trigger close on releasable, reliably for consistency. - * + *

                          * Overall ShardIndexingPressure provides: * 1. Memory Accounting at shard level. This can be enabled/disabled based on dynamic setting. * 2. Memory Accounting at Node level. Tracking is done using the IndexingPressure artefacts to support feature seamless toggling. diff --git a/server/src/main/java/org/opensearch/index/ShardIndexingPressureMemoryManager.java b/server/src/main/java/org/opensearch/index/ShardIndexingPressureMemoryManager.java index dea3cc8970cb5..e5c1af2e9c9f0 100644 --- a/server/src/main/java/org/opensearch/index/ShardIndexingPressureMemoryManager.java +++ b/server/src/main/java/org/opensearch/index/ShardIndexingPressureMemoryManager.java @@ -30,16 +30,16 @@ /** * The Shard Indexing Pressure Memory Manager is the construct responsible for increasing and decreasing the allocated shard limit * based on incoming requests. A shard limits defines the maximum memory that a shard can occupy in the heap for request objects. - * + *

                          * Based on the overall memory utilization on the node, and current traffic needs shard limits will be modified: - * + *

                          * 1. If the limits assigned to a shard is breached (Primary Parameter) while the node level overall occupancy across all shards * is not greater than primary_parameter.node.soft_limit, MemoryManager will increase the shard limits without any deeper evaluation. * 2. If the limits assigned to the shard is breached(Primary Parameter) and the node level overall occupancy across all shards * is greater than primary_parameter.node.soft_limit, then MemoryManager will evaluate deeper parameters for shards to identify any * issues, such as throughput degradation (Secondary Parameter - 1) and time since last request was successful (Secondary Parameter - 2). * This helps identify detect any duress state with the shard, requesting more memory. - * + *

                          * Secondary Parameters covered above: * 1. ThroughputDegradationLimitsBreached - When the moving window throughput average has increased by a factor compared to * the historical throughput average. If the factor by which it has increased is greater than the degradation limit threshold, this @@ -47,7 +47,7 @@ * 2. LastSuccessfulRequestDurationLimitsBreached - When the time since the last successful request completed is greater than the max * timeout threshold value, while there a number of outstanding requests greater than the max outstanding requests then this parameter * is considered to be breached. - * + *

                          * MemoryManager attempts to increase of decrease the shard limits in case the shard utilization goes below operating_factor.lower or * goes above operating_factor.upper of current shard limits. MemoryManager attempts to update the new shard limit such that the new value * remains withing the operating_factor.optimal range of current shard utilization. diff --git a/server/src/main/java/org/opensearch/index/ShardIndexingPressureStore.java b/server/src/main/java/org/opensearch/index/ShardIndexingPressureStore.java index b41dd1359394b..9b24d119f24fd 100644 --- a/server/src/main/java/org/opensearch/index/ShardIndexingPressureStore.java +++ b/server/src/main/java/org/opensearch/index/ShardIndexingPressureStore.java @@ -22,24 +22,24 @@ * Shard indexing pressure store acts as a central repository for all the shard-level tracker objects currently being * used at the Node level, for tracking indexing pressure requests. * Store manages the tracker lifecycle, from creation, access, until it is evicted to be collected. - * + *

                          * Trackers are maintained at two levels for access simplicity and better memory management: - * + *

                          * 1. shardIndexingPressureHotStore : As the name suggests, it is hot store for tracker objects which are currently live i.e. being used * to track an ongoing request. - * + *

                          * 2. shardIndexingPressureColdStore : This acts as the store for all the shard tracking objects which are currently being used * by the framework. In addition to hot trackers, the recently used trackers which are although not currently live, but again can be used * in near future, are also part of this store. To limit any memory implications, this store has an upper limit on the maximum number of * trackers its can hold at any given time, which is a configurable dynamic setting. - * + *

                          * Tracking objects when created are part of both the hot store as well as cold store. However, once the object * is no more live it is removed from the hot store. Objects in the cold store are evicted once the cold store * reaches its maximum limit. Think of it like a periodic purge when upper limit is hit. * During get if tracking object is not present in the hot store, a lookup is made into the cache store. If found, * object is brought into the hot store again, until it remains active. If not present in the either store, a fresh * object is instantiated and registered in both the stores for concurrent accesses. - * + *

                          * Note: The implementation of shardIndexingPressureColdStore methods is such that get, * update and evict operations can be abstracted out to support any other strategy such as LRU, if * discovered a need later. diff --git a/server/src/main/java/org/opensearch/index/ShardIndexingPressureTracker.java b/server/src/main/java/org/opensearch/index/ShardIndexingPressureTracker.java index 7d67b47141ef5..e0edb8260fd0f 100644 --- a/server/src/main/java/org/opensearch/index/ShardIndexingPressureTracker.java +++ b/server/src/main/java/org/opensearch/index/ShardIndexingPressureTracker.java @@ -14,19 +14,19 @@ * This class is responsible for all the tracking that needs to be performed at every Shard Level for Indexing Operations on the node. * Info is maintained at the granularity of three kinds of write operation (tasks) on the node i.e. coordinating, primary and replica. * This is useful in evaluating the shard indexing back-pressure on the node, to throttle requests and also to publish runtime stats. - * + *

                          * There can be four kinds of operation tracking on a node which needs to performed for a shard: * 1. Coordinating Operation : To track all the individual shard bulk request on the coordinator node. * 2. Primary Operation : To track all the individual shard bulk request on the primary node. * 3. Replica Operation : To track all the individual shard bulk request on the replica node. * 4. Common Operation : To track values applicable across the specific shard role. - * + *

                          * ShardIndexingPressureTracker therefore provides the construct to track all the write requests targeted for a ShardId on the node, * across all possible transport-write-actions i.e. Coordinator, Primary and Replica. * Tracker is uniquely identified against a Shard-Id on the node. Currently the knowledge of shard roles (such as primary vs replica) * is not explicit to the tracker, and it is able to track different values simultaneously based on the interaction hooks of the * operation type i.e. write-action layers. - * + *

                          * There is room for introducing more unique identity to the trackers based on Shard-Role or Shard-Allocation-Id, but that will also * increase the complexity of handling shard-lister events and handling other race scenarios such as request-draining etc. * To prefer simplicity we have modelled by keeping explicit fields for different operation tracking, while tracker by itself is diff --git a/server/src/main/java/org/opensearch/index/VersionType.java b/server/src/main/java/org/opensearch/index/VersionType.java index 3ce7f600a6a5b..8aa1fbd1b81ac 100644 --- a/server/src/main/java/org/opensearch/index/VersionType.java +++ b/server/src/main/java/org/opensearch/index/VersionType.java @@ -244,7 +244,7 @@ public byte getValue() { /** * Returns a human readable explanation for a version conflict on write. - * + *

                          * Note that this method is only called if {@link #isVersionConflictForWrites(long, long, boolean)} returns true; * * @param currentVersion the current version for the document @@ -265,7 +265,7 @@ public byte getValue() { /** * Returns a human readable explanation for a version conflict on read. - * + *

                          * Note that this method is only called if {@link #isVersionConflictForReads(long, long)} returns true; * * @param currentVersion the current version for the document diff --git a/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java index cfdf416b2d533..3b1f6af8a0030 100644 --- a/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java @@ -244,7 +244,7 @@ public IndexAnalyzers build(IndexSettings indexSettings) throws IOException { /** * Creates a custom analyzer from a collection of {@link NameOrDefinition} specifications for each component - * + *

                          * Callers are responsible for closing the returned Analyzer */ public NamedAnalyzer buildCustomAnalyzer( diff --git a/server/src/main/java/org/opensearch/index/analysis/NormalizingCharFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/NormalizingCharFilterFactory.java index ab8d23339029c..30fe31105e1d9 100644 --- a/server/src/main/java/org/opensearch/index/analysis/NormalizingCharFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/NormalizingCharFilterFactory.java @@ -36,7 +36,7 @@ /** * A CharFilterFactory that also supports normalization - * + *

                          * The default implementation of {@link #normalize(Reader)} delegates to * {@link #create(Reader)} * diff --git a/server/src/main/java/org/opensearch/index/analysis/NormalizingTokenFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/NormalizingTokenFilterFactory.java index be761aee0d36c..2ed621cdd22b1 100644 --- a/server/src/main/java/org/opensearch/index/analysis/NormalizingTokenFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/NormalizingTokenFilterFactory.java @@ -36,7 +36,7 @@ /** * A TokenFilterFactory that may be used for normalization - * + *

                          * The default implementation delegates {@link #normalize(TokenStream)} to * {@link #create(TokenStream)}}. * diff --git a/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java b/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java index a65e1898bea0d..8719d127781e0 100644 --- a/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java @@ -102,7 +102,7 @@ public void close() throws IOException { /** * A special cache that closes the gap between PreBuiltAnalyzers and PreBuiltAnalyzerProviderFactory. - * + *

                          * This can be removed when all analyzers have been moved away from PreBuiltAnalyzers to * PreBuiltAnalyzerProviderFactory either in server or analysis-common. * diff --git a/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java index e66ae20508dfe..16a1f9a067998 100644 --- a/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java @@ -155,11 +155,11 @@ public TokenStream create(TokenStream tokenStream) { filter.setTokenSeparator(tokenSeparator); filter.setFillerToken(fillerToken); if (outputUnigrams || (minShingleSize != maxShingleSize)) { - /** - * We disable the graph analysis on this token stream - * because it produces shingles of different size. - * Graph analysis on such token stream is useless and dangerous as it may create too many paths - * since shingles of different size are not aligned in terms of positions. + /* + We disable the graph analysis on this token stream + because it produces shingles of different size. + Graph analysis on such token stream is useless and dangerous as it may create too many paths + since shingles of different size are not aligned in terms of positions. */ filter.addAttribute(DisableGraphAttribute.class); } diff --git a/server/src/main/java/org/opensearch/index/analysis/TokenFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/TokenFilterFactory.java index 1b9d781b177ce..6708db8571b2a 100644 --- a/server/src/main/java/org/opensearch/index/analysis/TokenFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/TokenFilterFactory.java @@ -51,7 +51,7 @@ public interface TokenFilterFactory { /** * Normalize a tokenStream for use in multi-term queries - * + *

                          * The default implementation is a no-op */ default TokenStream normalize(TokenStream tokenStream) { @@ -86,7 +86,7 @@ default TokenFilterFactory getChainAwareTokenFilterFactory( /** * Return a version of this TokenFilterFactory appropriate for synonym parsing - * + *

                          * Filters that should not be applied to synonyms (for example, those that produce * multiple tokens) should throw an exception * diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index 3eeceff2253c1..8e1627af274c5 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -402,7 +402,7 @@ public CompletionStats completionStats(String... fieldNamePatterns) { * The main purpose for this is that if we have external refreshes happening we don't issue extra * refreshes to clear version map memory etc. this can cause excessive segment creation if heavy indexing * is happening and the refresh interval is low (ie. 1 sec) - * + *

                          * This also prevents segment starvation where an internal reader holds on to old segments literally forever * since no indexing is happening and refreshes are only happening to the external reader manager, while with * this specialized implementation an external refresh will immediately be reflected on the internal reader diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index 020e92aba4ce5..f9f0458f45ee8 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -180,7 +180,7 @@ public synchronized void updateSegments(final SegmentInfos infos) throws IOExcep /** * Persist the latest live SegmentInfos. - * + *

                          * This method creates a commit point from the latest SegmentInfos. * * @throws IOException - When there is an IO error committing the SegmentInfos. diff --git a/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java b/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java index 2e8bd6409c2f6..19454967f9ee3 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java +++ b/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java @@ -22,7 +22,7 @@ /** * This class is heavily influenced by Lucene's ReplicaFileDeleter class used to keep track of * segment files that should be preserved on replicas between replication events. - * + *

                          * https://github.com/apache/lucene/blob/main/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/ReplicaFileDeleter.java * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/fielddata/MultiGeoPointValues.java b/server/src/main/java/org/opensearch/index/fielddata/MultiGeoPointValues.java index 2df4baeb8631b..3090b8e7f5b15 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/MultiGeoPointValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/MultiGeoPointValues.java @@ -73,7 +73,7 @@ protected MultiGeoPointValues() {} /** * Return the next value associated with the current document. This must not be * called more than {@link #docValueCount()} times. - * + *

                          * Note: the returned {@link GeoPoint} might be shared across invocations. * * @return the next value for the current docID set to {@link #advanceExact(int)}. diff --git a/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java b/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java index a3c3774b250dc..5860cfe8a47c3 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java @@ -55,7 +55,7 @@ /** * Script level doc values, the assumption is that any implementation will * implement a {@link Longs#getValue getValue} method. - * + *

                          * Implementations should not internally re-use objects for the values that they * return as a single {@link ScriptDocValues} instance can be reused to return * values form multiple documents. @@ -589,11 +589,11 @@ public BytesRef get(int index) { + "Use doc[].size()==0 to check if a document is missing a field!" ); } - /** - * We need to make a copy here because {@link BinaryScriptDocValues} might reuse the - * returned value and the same instance might be used to - * return values from multiple documents. - **/ + /* + We need to make a copy here because {@link BinaryScriptDocValues} might reuse the + returned value and the same instance might be used to + return values from multiple documents. + */ return values[index].toBytesRef(); } diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java index 4f27c9b10f0ee..fe033fa7a3f70 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java @@ -101,9 +101,9 @@ public SortedSetOrdinalsIndexFieldData( @Override public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested); - /** - * Check if we can use a simple {@link SortedSetSortField} compatible with index sorting and - * returns a custom sort field otherwise. + /* + Check if we can use a simple {@link SortedSetSortField} compatible with index sorting and + returns a custom sort field otherwise. */ if (nested != null || (sortMode != MultiValueMode.MAX && sortMode != MultiValueMode.MIN) diff --git a/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java index a038cf178bb03..3b6782b34feea 100644 --- a/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java @@ -121,7 +121,7 @@ public abstract static class Parser { /** * Given a parsed value and a format string, formats the value into a plain Java object. - * + *

                          * Supported formats include 'geojson' and 'wkt'. The different formats are defined * as subclasses of {@link org.opensearch.common.geo.GeometryFormat}. */ @@ -129,7 +129,7 @@ public abstract static class Parser { /** * Parses the given value, then formats it according to the 'format' string. - * + *

                          * By default, this method simply parses the value using {@link Parser#parse}, then formats * it with {@link Parser#format}. However some {@link Parser} implementations override this * as they can avoid parsing the value if it is already in the right format. diff --git a/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java index f5dc34ab8ac5d..b3112df86bab6 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java @@ -43,7 +43,7 @@ /** * An implementation of {@link ValueFetcher} that knows how to extract values * from the document source. - * + *

                          * This class differs from {@link SourceValueFetcher} in that it directly handles * array values in parsing. Field types should use this class if their corresponding * mapper returns true for {@link FieldMapper#parsesArrayValue()}. diff --git a/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java index 90ef3cc1689a4..e1413fd9b4bbe 100644 --- a/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java @@ -447,13 +447,13 @@ int getMaxInputLength() { /** * Parses and indexes inputs - * + *

                          * Parsing: * Acceptable format: * "STRING" - interpreted as field value (input) * "ARRAY" - each element can be one of "OBJECT" (see below) * "OBJECT" - { "input": STRING|ARRAY, "weight": STRING|INT, "contexts": ARRAY|OBJECT } - * + *

                          * Indexing: * if context mappings are defined, delegates to {@link ContextMappings#addField(ParseContext.Document, String, String, int, Map)} * else adds inputs as a {@link org.apache.lucene.search.suggest.document.SuggestField} diff --git a/server/src/main/java/org/opensearch/index/mapper/CustomDocValuesField.java b/server/src/main/java/org/opensearch/index/mapper/CustomDocValuesField.java index 2776e7515bbf6..fbb67731f581b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/CustomDocValuesField.java +++ b/server/src/main/java/org/opensearch/index/mapper/CustomDocValuesField.java @@ -42,8 +42,7 @@ import java.io.Reader; /** - * Base class for constructing a custom docvalues type - * + * Base class for constructing a custom docvalues type. * used for binary, geo, and range fields * * @opensearch.api diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java index dbafc2a923cf2..db58c5e02e176 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java @@ -197,7 +197,7 @@ private static String getRemainingFields(Map map) { /** * Given an optional type name and mapping definition, returns the type and a normalized form of the mappings. - * + *

                          * The provided mapping definition may or may not contain the type name as the root key in the map. This method * attempts to unwrap the mappings, so that they no longer contain a type name at the root. If no type name can * be found, through either the 'type' parameter or by examining the provided mappings, then an exception will be @@ -205,7 +205,6 @@ private static String getRemainingFields(Map map) { * * @param type An optional type name. * @param root The mapping definition. - * * @return A tuple of the form (type, normalized mappings). */ @SuppressWarnings({ "unchecked" }) diff --git a/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java index 94bc4806ba0e0..2e59d86f9119c 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java @@ -43,7 +43,7 @@ * to {@link DynamicKeyFieldMapper#keyedFieldType(String)}, with 'some_key' passed as the * argument. The field mapper is allowed to create a new field type dynamically in order * to handle the search. - * + *

                          * To prevent conflicts between these dynamic sub-keys and multi-fields, any field mappers * implementing this interface should explicitly disallow multi-fields. The constructor makes * sure to passes an empty multi-fields list to help prevent conflicting sub-keys from being diff --git a/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldTypeLookup.java b/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldTypeLookup.java index 13150ddc50a51..a415078108eb6 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldTypeLookup.java +++ b/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldTypeLookup.java @@ -37,11 +37,11 @@ /** * A container that supports looking up field types for 'dynamic key' fields ({@link DynamicKeyFieldMapper}). - * + *

                          * Compared to standard fields, 'dynamic key' fields require special handling. Given a field name of the form * 'path_to_field.path_to_key', the container will dynamically return a new {@link MappedFieldType} that is * suitable for performing searches on the sub-key. - * + *

                          * Note: we anticipate that 'flattened' fields will be the only implementation {@link DynamicKeyFieldMapper}. * Flattened object fields live in the 'mapper-flattened' module. * diff --git a/server/src/main/java/org/opensearch/index/mapper/FieldAliasMapper.java b/server/src/main/java/org/opensearch/index/mapper/FieldAliasMapper.java index 86c92ec19a2f7..ff9cb61b85571 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FieldAliasMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FieldAliasMapper.java @@ -43,7 +43,7 @@ /** * A mapper for field aliases. - * + *

                          * A field alias has no concrete field mappings of its own, but instead points to another field by * its path. Once defined, an alias can be used in place of the concrete field name in search requests. * diff --git a/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java index f8514c86fa418..f6178a8284945 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java @@ -305,7 +305,7 @@ public void parse(ParseContext context) throws IOException { /** * Parse the field value and populate the fields on {@link ParseContext#doc()}. - * + *

                          * Implementations of this method should ensure that on failing to parse parser.currentToken() must be the * current failing token */ diff --git a/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java b/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java index 8e1b6f2a3c08b..549ab0403e5dc 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java +++ b/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java @@ -57,7 +57,7 @@ class FieldTypeLookup implements Iterable { * A map from field name to all fields whose content has been copied into it * through copy_to. A field only be present in the map if some other field * has listed it as a target of copy_to. - * + *

                          * For convenience, the set of copied fields includes the field itself. */ private final Map> fieldToCopiedFields = new HashMap<>(); @@ -133,7 +133,7 @@ public Set simpleMatchToFullName(String pattern) { /** * Given a concrete field name, return its paths in the _source. - * + *

                          * For most fields, the source path is the same as the field itself. However * there are cases where a field's values are found elsewhere in the _source: * - For a multi-field, the source path is the parent field. diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java index db35c3edcc4a8..00b623dddac23 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java @@ -575,9 +575,9 @@ protected void parseCreateField(ParseContext context) throws IOException { context, fieldType().name() ); - /** - * JsonToStringParser is the main parser class to transform JSON into stringFields in a XContentParser - * It reads the JSON object and parsed to a list of string + /* + JsonToStringParser is the main parser class to transform JSON into stringFields in a XContentParser + It reads the JSON object and parsed to a list of string */ XContentParser parser = JsonToStringParser.parseObject(); diff --git a/server/src/main/java/org/opensearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/GeoPointFieldMapper.java index e3dab3f892949..fcca7e9804bf3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/GeoPointFieldMapper.java @@ -63,7 +63,7 @@ /** * Field Mapper for geo_point types. - * + *

                          * Uses lucene 6 LatLonPoint encoding * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java index 58aa0bb2576e2..997835f712038 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java @@ -125,7 +125,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S /** * Create a helper class to fetch field values during the {@link FetchFieldsPhase}. - * + *

                          * New field types must implement this method in order to support the search 'fields' option. Except * for metadata fields, field types should not throw {@link UnsupportedOperationException} since this * could cause a search retrieving multiple fields (like "fields": ["*"]) to fail. @@ -487,7 +487,7 @@ public Map meta() { /** * Returns information on how any text in this field is indexed - * + *

                          * Fields that do not support any text-based queries should return * {@link TextSearchInfo#NONE}. Some fields (eg numeric) may support * only simple match queries, and can return diff --git a/server/src/main/java/org/opensearch/index/mapper/Mapper.java b/server/src/main/java/org/opensearch/index/mapper/Mapper.java index 59c647d38f0de..9c0e3ef72549a 100644 --- a/server/src/main/java/org/opensearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/Mapper.java @@ -190,7 +190,7 @@ public Supplier queryShardContextSupplier() { /** * Gets an optional default date format for date fields that do not have an explicit format set - * + *

                          * If {@code null}, then date fields will default to {@link DateFieldMapper#DEFAULT_DATE_TIME_FORMATTER}. */ public DateFormatter getDateFormatter() { diff --git a/server/src/main/java/org/opensearch/index/mapper/MappingLookup.java b/server/src/main/java/org/opensearch/index/mapper/MappingLookup.java index 024f4b71584bf..1c608fc52c1f5 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MappingLookup.java +++ b/server/src/main/java/org/opensearch/index/mapper/MappingLookup.java @@ -157,7 +157,7 @@ public MappingLookup( /** * Returns the leaf mapper associated with this field name. Note that the returned mapper * could be either a concrete {@link FieldMapper}, or a {@link FieldAliasMapper}. - * + *

                          * To access a field's type information, {@link MapperService#fieldType} should be used instead. */ public Mapper getMapper(String field) { diff --git a/server/src/main/java/org/opensearch/index/mapper/MetadataFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/MetadataFieldMapper.java index 4f5aefdfeed55..5a45ab72994ee 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MetadataFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/MetadataFieldMapper.java @@ -70,7 +70,7 @@ public interface TypeParser extends Mapper.TypeParser { /** * Declares an updateable boolean parameter for a metadata field - * + *

                          * We need to distinguish between explicit configuration and default value for metadata * fields, because mapping updates will carry over the previous metadata values if a * metadata field is not explicitly declared in the update. A standard boolean diff --git a/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java index fd57975831e88..93b929a82f095 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java @@ -63,11 +63,11 @@ /** * Defines how a particular field should be indexed and searched - * + *

                          * Configuration {@link Parameter}s for the mapper are defined on a {@link Builder} subclass, * and returned by its {@link Builder#getParameters()} method. Merging, serialization * and parsing of the mapper are all mediated through this set of parameters. - * + *

                          * Subclasses should implement a {@link Builder} that is returned from the * {@link #getMergeBuilder()} method, initialised with the existing builder. * @@ -86,7 +86,7 @@ protected ParametrizedFieldMapper(String simpleName, MappedFieldType mappedField /** * Returns a {@link Builder} to be used for merging and serialization - * + *

                          * Implement as follows: * {@code return new MyBuilder(simpleName()).init(this); } */ @@ -256,7 +256,7 @@ public Parameter acceptsNull() { /** * Adds a deprecated parameter name. - * + *

                          * If this parameter name is encountered during parsing, a deprecation warning will * be emitted. The parameter will be serialized with its main name. */ diff --git a/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java index 16f76f087e403..038066773a360 100644 --- a/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java @@ -55,11 +55,11 @@ /** * Mapper for the {@code _seq_no} field. - * + *

                          * We expect to use the seq# for sorting, during collision checking and for * doing range searches. Therefore the {@code _seq_no} field is stored both * as a numeric doc value and as numeric indexed field. - * + *

                          * This mapper also manages the primary term field, which has no OpenSearch named * equivalent. The primary term is only used during collision after receiving * identical seq# values for two document copies. The primary term is stored as diff --git a/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java index 69f53ba126790..a32d1c9f489ca 100644 --- a/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java @@ -46,7 +46,7 @@ * An implementation of {@link ValueFetcher} that knows how to extract values * from the document source. Most standard field mappers will use this class * to implement value fetching. - * + *

                          * Field types that handle arrays directly should instead use {@link ArraySourceValueFetcher}. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/mapper/ValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/ValueFetcher.java index d1cea3fe7f1b0..96237b16ea5a4 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/ValueFetcher.java @@ -48,11 +48,11 @@ public interface ValueFetcher { /** * Given access to a document's _source, return this field's values. - * + *

                          * In addition to pulling out the values, they will be parsed into a standard form. * For example numeric field mappers make sure to parse the source value into a number * of the right type. - * + *

                          * Note that for array values, the order in which values are returned is undefined and * should not be relied on. * diff --git a/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java index 6b8dd08ed0d91..66c6ee115c3f0 100644 --- a/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java @@ -298,7 +298,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws I /** * For internal usage only! - * + *

                          * Extracts the inner hits from the query tree. * While it extracts inner hits, child inner hits are inlined into the inner hit builder they belong to. */ diff --git a/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java index a4b75beab26ea..1fade8601e2a6 100644 --- a/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -60,7 +60,7 @@ /** * Creates a Lucene query that will filter for all documents that lie within the specified * bounding box. - * + *

                          * This query can only operate on fields of type geo_point that have latitude and longitude * enabled. * diff --git a/server/src/main/java/org/opensearch/index/query/GeoShapeQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/GeoShapeQueryBuilder.java index e029884f32531..33b896a1d5163 100644 --- a/server/src/main/java/org/opensearch/index/query/GeoShapeQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/GeoShapeQueryBuilder.java @@ -55,7 +55,7 @@ /** * Derived {@link AbstractGeometryQueryBuilder} that builds a lat, lon GeoShape Query. It * can be applied to any {@link MappedFieldType} that implements {@link GeoShapeQueryable}. - * + *

                          * GeoJson and WKT shape definitions are supported * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/query/GeoValidationMethod.java b/server/src/main/java/org/opensearch/index/query/GeoValidationMethod.java index 559c084325abb..bb3cd34ae629d 100644 --- a/server/src/main/java/org/opensearch/index/query/GeoValidationMethod.java +++ b/server/src/main/java/org/opensearch/index/query/GeoValidationMethod.java @@ -42,7 +42,7 @@ /** * This enum is used to determine how to deal with invalid geo coordinates in geo related * queries: - * + *

                          * On STRICT validation invalid coordinates cause an exception to be thrown. * On IGNORE_MALFORMED invalid coordinates are being accepted. * On COERCE invalid coordinates are being corrected to the most likely valid coordinate. diff --git a/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java b/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java index b5cc3238295e0..5a57dfed14f69 100644 --- a/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java +++ b/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java @@ -691,7 +691,7 @@ public static class Regexp extends IntervalsSourceProvider { /** * Constructor - * + *

                          * {@code flags} is Lucene's syntax flags * and {@code caseInsensitive} enables Lucene's only matching flag. */ diff --git a/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java index 9a8d2ab104799..7ceb17203e837 100644 --- a/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java @@ -176,7 +176,7 @@ public String minimumShouldMatch() { } @Deprecated - /** Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ + /* Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ public MatchBoolPrefixQueryBuilder fuzziness(Object fuzziness) { this.fuzziness = Fuzziness.build(fuzziness); return this; diff --git a/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java index 844d47070923b..5e9e6a3660e76 100644 --- a/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java @@ -209,7 +209,7 @@ public String analyzer() { } @Deprecated - /** Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ + /* Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ public MatchQueryBuilder fuzziness(Object fuzziness) { this.fuzziness = Fuzziness.build(fuzziness); return this; diff --git a/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java index 84b30209f31bd..e6472afef2215 100644 --- a/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java @@ -87,7 +87,7 @@ /** * A more like this query that finds documents that are "like" the provided set of document(s). - * + *

                          * The documents are provided as a set of strings and/or a list of {@link Item}. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java index 1aa013cb24d5e..6227e5d2fa806 100644 --- a/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java @@ -398,8 +398,8 @@ public int slop() { } @Deprecated - /** - * Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". + /* + Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ public MultiMatchQueryBuilder fuzziness(Object fuzziness) { if (fuzziness != null) { diff --git a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java index 701484fbc8dc3..7b248c2a6f3c3 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java +++ b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java @@ -510,7 +510,7 @@ public final void freezeContext() { /** * This method fails if {@link #freezeContext()} is called before on this * context. This is used to seal. - * + *

                          * This methods and all methods that call it should be final to ensure that * setting the request as not cacheable and the freezing behaviour of this * class cannot be bypassed. This is important so we can trust when this diff --git a/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java index 469dc51da323a..3d8fbd5fc436d 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java @@ -119,7 +119,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder * Can be changed back to HashMap once https://issues.apache.org/jira/browse/LUCENE-6305 is fixed. */ private final Map fieldsAndWeights = new TreeMap<>(); diff --git a/server/src/main/java/org/opensearch/index/query/RangeQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/RangeQueryBuilder.java index 0d54373112904..fdbef2c732361 100644 --- a/server/src/main/java/org/opensearch/index/query/RangeQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/RangeQueryBuilder.java @@ -504,9 +504,9 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws @Override protected Query doToQuery(QueryShardContext context) throws IOException { if (from == null && to == null) { - /** - * Open bounds on both side, we can rewrite to an exists query - * if the {@link FieldNamesFieldMapper} is enabled. + /* + Open bounds on both side, we can rewrite to an exists query + if the {@link FieldNamesFieldMapper} is enabled. */ final FieldNamesFieldMapper.FieldNamesFieldType fieldNamesFieldType = (FieldNamesFieldMapper.FieldNamesFieldType) context .getMapperService() diff --git a/server/src/main/java/org/opensearch/index/query/SearchIndexNameMatcher.java b/server/src/main/java/org/opensearch/index/query/SearchIndexNameMatcher.java index 00758309fc0f0..598406b4e45f2 100644 --- a/server/src/main/java/org/opensearch/index/query/SearchIndexNameMatcher.java +++ b/server/src/main/java/org/opensearch/index/query/SearchIndexNameMatcher.java @@ -73,7 +73,7 @@ public SearchIndexNameMatcher( /** * Given an index pattern, checks whether it matches against the current shard. - * + *

                          * If this shard represents a remote shard target, then in order to match the pattern contain * the separator ':', and must match on both the cluster alias and index name. */ diff --git a/server/src/main/java/org/opensearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/opensearch/index/query/SimpleQueryStringBuilder.java index 3cd0554af49a4..57ae7dd0ea5e9 100644 --- a/server/src/main/java/org/opensearch/index/query/SimpleQueryStringBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SimpleQueryStringBuilder.java @@ -66,7 +66,7 @@ *

                        • '{@code -}' negates a single token: {@code -token0} *
                        • '{@code "}' creates phrases of terms: {@code "term1 term2 ..."} *
                        • '{@code *}' at the end of terms specifies prefix query: {@code term*} - *
                        • '{@code (}' and '{@code)}' specifies precedence: {@code token1 + (token2 | token3)} + *
                        • '{@code (}' and '{@code )}' specifies precedence: {@code token1 + (token2 | token3)} *
                        • '{@code ~}N' at the end of terms specifies fuzzy query: {@code term~1} *
                        • '{@code ~}N' at the end of phrases specifies near/slop query: {@code "term1 term2"~5} *
                        diff --git a/server/src/main/java/org/opensearch/index/reindex/BulkByScrollTask.java b/server/src/main/java/org/opensearch/index/reindex/BulkByScrollTask.java index f33800659245f..d7c0da4773fff 100644 --- a/server/src/main/java/org/opensearch/index/reindex/BulkByScrollTask.java +++ b/server/src/main/java/org/opensearch/index/reindex/BulkByScrollTask.java @@ -73,13 +73,13 @@ /** * Task storing information about a currently running BulkByScroll request. - * + *

                        * When the request is not sliced, this task is the only task created, and starts an action to perform search requests. - * + *

                        * When the request is sliced, this task can either represent a coordinating task (using * {@link BulkByScrollTask#setWorkerCount(int)}) or a worker task that performs search queries (using * {@link BulkByScrollTask#setWorker(float, Integer)}). - * + *

                        * We don't always know if this task will be a leader or worker task when it's created, because if slices is set to "auto" it may * be either depending on the number of shards in the source indices. We figure that out when the request is handled and set it on this * class with {@link #setWorkerCount(int)} or {@link #setWorker(float, Integer)}. diff --git a/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java b/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java index aff9ec1f20e46..4963080f5916c 100644 --- a/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java +++ b/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java @@ -49,7 +49,7 @@ /** * Creates a new {@link DeleteByQueryRequest} that uses scrolling and bulk requests to delete all documents matching * the query. This can have performance as well as visibility implications. - * + *

                        * Delete-by-query now has the following semantics: *

                          *
                        • it's {@code non-atomic}, a delete-by-query may fail at any time while some documents matching the query have already been diff --git a/server/src/main/java/org/opensearch/index/search/MatchQuery.java b/server/src/main/java/org/opensearch/index/search/MatchQuery.java index ade4fb1e69586..9e2b79971369d 100644 --- a/server/src/main/java/org/opensearch/index/search/MatchQuery.java +++ b/server/src/main/java/org/opensearch/index/search/MatchQuery.java @@ -753,9 +753,9 @@ private Query analyzeGraphBoolean(String field, TokenStream source, BooleanClaus lastState = end; final Query queryPos; boolean usePrefix = isPrefix && end == -1; - /** - * check if the GraphTokenStreamFiniteStrings graph is empty - * return empty BooleanQuery result + /* + check if the GraphTokenStreamFiniteStrings graph is empty + return empty BooleanQuery result */ Iterator graphIt = graph.getFiniteStrings(); if (!graphIt.hasNext()) { diff --git a/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java b/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java index 0a4c197898d3d..155866e20d007 100644 --- a/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java @@ -147,7 +147,7 @@ public synchronized void markSeqNoAsPersisted(final long seqNo) { /** * Updates the processed sequence checkpoint to the given value. - * + *

                          * This method is only used for segment replication since indexing doesn't * take place on the replica allowing us to avoid the check that all sequence numbers * are consecutively processed. @@ -208,7 +208,7 @@ public long getMaxSeqNo() { /** * constructs a {@link SeqNoStats} object, using local state and the supplied global checkpoint - * + *

                          * This is needed to make sure the persisted local checkpoint and max seq no are consistent */ public synchronized SeqNoStats getStats(final long globalCheckpoint) { diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 4b6d72b86ff62..94f376d923689 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -88,7 +88,7 @@ /** * This class is responsible for tracking the replication group with its progress and safety markers (local and global checkpoints). - * + *

                          * The global checkpoint is the highest sequence number for which all lower (or equal) sequence number have been processed * on all shards that are currently active. Since shards count as "active" when the cluster-manager starts * them, and before this primary shard has been notified of this fact, we also include shards that have completed recovery. These shards @@ -112,10 +112,10 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L * checkpoint based on the local checkpoints of all in-sync shard copies. * - replica: this shard receives global checkpoint information from the primary (see * {@link #updateGlobalCheckpointOnReplica(long, String)}). - * + *

                          * When a shard is initialized (be it a primary or replica), it initially operates in replica mode. The global checkpoint tracker is * then switched to primary mode in the following three scenarios: - * + *

                          * - An initializing primary shard that is not a relocation target is moved to primary mode (using {@link #activatePrimaryMode}) once * the shard becomes active. * - An active replica shard is moved to primary mode (using {@link #activatePrimaryMode}) once it is promoted to primary. @@ -140,7 +140,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L * in-sync shard copies cannot grow, otherwise the relocation target might miss this information and increase the global checkpoint * to eagerly. As consequence, some of the methods in this class are not allowed to be called while a handoff is in progress, * in particular {@link #markAllocationIdAsInSync}. - * + *

                          * A notable exception to this is the method {@link #updateFromClusterManager}, which is still allowed to be called during a relocation handoff. * The reason for this is that the handoff might fail and can be aborted (using {@link #abortRelocationHandoff}), in which case * it is important that the global checkpoint tracker does not miss any state updates that might happened during the handoff attempt. @@ -1163,7 +1163,7 @@ public synchronized void updateGlobalCheckpointForShard(final String allocationI /** * Update the local knowledge of the visible checkpoint for the specified allocation ID. - * + *

                          * This method will also stop timers for each shard and compute replication lag metrics. * * @param allocationId the allocation ID to update the global checkpoint for diff --git a/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java b/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java index d1cb396f55d0f..3017c3ce6dcff 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java @@ -155,7 +155,7 @@ default void beforeIndexShardDeleted(ShardId shardId, Settings indexSettings) {} /** * Called after the index shard has been deleted from disk. - * + *

                          * Note: this method is only called if the deletion of the shard did finish without an exception * * @param shardId The shard id diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 2643af501bbc9..6ac75c7bfec17 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -1618,7 +1618,7 @@ public ReplicationCheckpoint getLatestReplicationCheckpoint() { /** * Compute and return the latest ReplicationCheckpoint for a shard and a GatedCloseable containing the corresponding SegmentInfos. * The segments referenced by the SegmentInfos will remain on disk until the GatedCloseable is closed. - * + *

                          * Primary shards compute the seqNo used in the replication checkpoint from the fetched SegmentInfos. * Replica shards compute the seqNo from its latest processed checkpoint, which only increases when refreshing on new segments. * @@ -3774,9 +3774,9 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro ); } - /** - * With segment replication enabled for primary relocation, recover replica shard initially as read only and - * change to a writeable engine during relocation handoff after a round of segment replication. + /* + With segment replication enabled for primary relocation, recover replica shard initially as read only and + change to a writeable engine during relocation handoff after a round of segment replication. */ boolean isReadOnlyReplica = indexSettings.isSegRepEnabled() && (shardRouting.primary() == false @@ -4414,7 +4414,7 @@ public final boolean isSearchIdle() { /** * * Returns true if this shard supports search idle. - * + *

                          * Indices using Segment Replication will ignore search idle unless there are no replicas. * Primary shards push out new segments only * after a refresh, so we don't want to wait for a search to trigger that cycle. Replicas will only refresh after receiving diff --git a/server/src/main/java/org/opensearch/index/shard/RefreshListeners.java b/server/src/main/java/org/opensearch/index/shard/RefreshListeners.java index 7dbbcbb2d7d20..803db773efe6c 100644 --- a/server/src/main/java/org/opensearch/index/shard/RefreshListeners.java +++ b/server/src/main/java/org/opensearch/index/shard/RefreshListeners.java @@ -54,7 +54,7 @@ /** * Allows for the registration of listeners that are called when a change becomes visible for search. This functionality is exposed from * {@link IndexShard} but kept here so it can be tested without standing up the entire thing. - * + *

                          * When {@link Closeable#close()}d it will no longer accept listeners and flush any existing listeners. * * @opensearch.internal @@ -86,7 +86,7 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener, * List of refresh listeners. Defaults to null and built on demand because most refresh cycles won't need it. Entries are never removed * from it, rather, it is nulled and rebuilt when needed again. The (hopefully) rare entries that didn't make the current refresh cycle * are just added back to the new list. Both the reference and the contents are always modified while synchronized on {@code this}. - * + *

                          * We never set this to non-null while closed it {@code true}. */ private volatile List>> refreshListeners = null; diff --git a/server/src/main/java/org/opensearch/index/similarity/SimilarityProvider.java b/server/src/main/java/org/opensearch/index/similarity/SimilarityProvider.java index ad64f3a55228f..085e93c794fb7 100644 --- a/server/src/main/java/org/opensearch/index/similarity/SimilarityProvider.java +++ b/server/src/main/java/org/opensearch/index/similarity/SimilarityProvider.java @@ -70,23 +70,23 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; SimilarityProvider that = (SimilarityProvider) o; - /** - * We check name only because the similarity is - * re-created for each new instance and they don't implement equals. - * This is not entirely correct though but we only use equality checks - * for similarities inside the same index and names are unique in this case. - **/ + /* + We check name only because the similarity is + re-created for each new instance and they don't implement equals. + This is not entirely correct though but we only use equality checks + for similarities inside the same index and names are unique in this case. + */ return Objects.equals(name, that.name); } @Override public int hashCode() { - /** - * We use name only because the similarity is - * re-created for each new instance and they don't implement equals. - * This is not entirely correct though but we only use equality checks - * for similarities a single index and names are unique in this case. - **/ + /* + We use name only because the similarity is + re-created for each new instance and they don't implement equals. + This is not entirely correct though but we only use equality checks + for similarities a single index and names are unique in this case. + */ return Objects.hash(name); } } diff --git a/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java b/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java index eefc1469a06a0..aa5d90cc65803 100644 --- a/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java +++ b/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java @@ -322,10 +322,10 @@ public String snapshot() { return snapshot; } - /** - * Returns list of files in the shard - * - * @return list of files + /* + Returns list of files in the shard + + @return list of files */ /** diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 9f505121d8dfa..b822742de6e97 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -299,14 +299,15 @@ final void ensureOpen() { /** * Returns a new MetadataSnapshot for the given commit. If the given commit is null * the latest commit point is used. - * + *

                          * Note that this method requires the caller verify it has the right to access the store and * no concurrent file changes are happening. If in doubt, you probably want to use one of the following: - * + *

                          * {@link #readMetadataSnapshot(Path, ShardId, NodeEnvironment.ShardLocker, Logger)} to read a meta data while locking * {@link IndexShard#snapshotStoreMetadata()} to safely read from an existing shard * {@link IndexShard#acquireLastIndexCommit(boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed - * @param commit the index commit to read the snapshot from or null if the latest snapshot should be read from the + * + * @param commit the index commit to read the snapshot from or {@code null} if the latest snapshot should be read from the * directory * @throws CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum mismatch or an * unexpected exception when opening the index reading the segments file. @@ -330,10 +331,10 @@ public MetadataSnapshot getMetadata() throws IOException { /** * Returns a new MetadataSnapshot for the given commit. If the given commit is null * the latest commit point is used. - * + *

                          * Note that this method requires the caller verify it has the right to access the store and * no concurrent file changes are happening. If in doubt, you probably want to use one of the following: - * + *

                          * {@link #readMetadataSnapshot(Path, ShardId, NodeEnvironment.ShardLocker, Logger)} to read a meta data while locking * {@link IndexShard#snapshotStoreMetadata()} to safely read from an existing shard * {@link IndexShard#acquireLastIndexCommit(boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed @@ -806,7 +807,7 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetadata) thr /** * Segment replication method - * + *

                          * This method takes the segment info bytes to build SegmentInfos. It inc'refs files pointed by passed in SegmentInfos * bytes to ensure they are not deleted. * @@ -881,7 +882,7 @@ public void beforeClose() { * have the ability to create a writer directly from a SegmentInfos object. To promote the replica as a primary and avoid reindexing, we must first commit * on the replica so that it can be opened with a writeable engine. Further, InternalEngine currently invokes `trimUnsafeCommits` which reverts the engine to a previous safeCommit where the max seqNo is less than or equal * to the current global checkpoint. It is likely that the replica has a maxSeqNo that is higher than the global cp and a new commit will be wiped. - * + *

                          * To get around these limitations, this method first creates an IndexCommit directly from SegmentInfos, it then * uses an appending IW to create an IndexCommit from the commit created on SegmentInfos. * This ensures that 1. All files in the new commit are fsynced and 2. Deletes older commit points so the only commit to start from is our new commit. diff --git a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java index 7319a5324777a..6fd198747570f 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java +++ b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java @@ -27,7 +27,7 @@ *
                          * This class delegate the responsibility of actually fetching the block when demanded to its subclasses using * {@link OnDemandBlockIndexInput#fetchBlock(int)}. - * + *

                          * Like {@link IndexInput}, this class may only be used from one thread as it is not thread safe. * However, a cleaning action may run from another thread triggered by the {@link Cleaner}, but * this is okay because at that point the {@link OnDemandBlockIndexInput} instance is phantom @@ -428,10 +428,10 @@ Builder blockSizeShift(int blockSizeShift) { * instance to hold the current underlying IndexInput, while allowing it to * be changed out with different instances as {@link OnDemandBlockIndexInput} * reads through the data. - * + *

                          * This class implements {@link Runnable} so that it can be passed directly * to the cleaner to run its close action. - * + *

                          * [1]: https://github.com/apache/lucene/blob/8340b01c3cc229f33584ce2178b07b8984daa6a9/lucene/core/src/java/org/apache/lucene/store/IndexInput.java#L32-L33 */ private static class BlockHolder implements Closeable, Runnable { diff --git a/server/src/main/java/org/opensearch/index/translog/DefaultTranslogDeletionPolicy.java b/server/src/main/java/org/opensearch/index/translog/DefaultTranslogDeletionPolicy.java index efc762ef00d52..05049e5d07373 100644 --- a/server/src/main/java/org/opensearch/index/translog/DefaultTranslogDeletionPolicy.java +++ b/server/src/main/java/org/opensearch/index/translog/DefaultTranslogDeletionPolicy.java @@ -14,7 +14,7 @@ /** * Default implementation for the {@link TranslogDeletionPolicy}. Plugins can override the default behaviour * via the {@link org.opensearch.plugins.EnginePlugin#getCustomTranslogDeletionPolicyFactory()}. - * + *

                          * The default policy uses total number, size in bytes and maximum age for files. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index baa3737d576de..cf7f18840a03e 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -840,7 +840,7 @@ public boolean ensureSynced(Stream locations) throws IOException { /** * Closes the translog if the current translog writer experienced a tragic exception. - * + *

                          * Note that in case this thread closes the translog it must not already be holding a read lock on the translog as it will acquire a * write lock in the course of closing the translog * @@ -1976,7 +1976,7 @@ static String createEmptyTranslog( /** * Creates a new empty translog within the specified {@code location} that contains the given {@code initialGlobalCheckpoint}, * {@code primaryTerm} and {@code translogUUID}. - * + *

                          * This method should be used directly under specific circumstances like for shards that will see no indexing. Specifying a non-unique * translog UUID could cause a lot of issues and that's why in all (but one) cases the method * {@link #createEmptyTranslog(Path, long, ShardId, long)} should be used instead. diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java b/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java index a5f0607431a8b..3f33c155be15e 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java @@ -376,7 +376,7 @@ synchronized boolean assertNoSeqAbove(long belowTerm, long aboveSeqNo) { /** * write all buffered ops to disk and fsync file. - * + *

                          * Note: any exception during the sync process will be interpreted as a tragic exception and the writer will be closed before * raising the exception. * @return true if this call caused an actual sync operation diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index e27a597007b62..50c551c2be29b 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -926,7 +926,7 @@ private EngineFactory getEngineFactory(final IndexSettings idxSettings) { /** * creates a new mapper service for the given index, in order to do administrative work like mapping updates. * This *should not* be used for document parsing. Doing so will result in an exception. - * + *

                          * Note: the returned {@link MapperService} should be closed when unneeded. */ public synchronized MapperService createIndexMapperService(IndexMetadata indexMetadata) throws IOException { @@ -1138,7 +1138,7 @@ public void deleteUnassignedIndex(String reason, IndexMetadata metadata, Cluster /** * Deletes the index store trying to acquire all shards locks for this index. * This method will delete the metadata for the index even if the actual shards can't be locked. - * + *

                          * Package private for testing */ void deleteIndexStore(String reason, IndexMetadata metadata) throws IOException { @@ -1219,7 +1219,7 @@ public void deleteShardStore(String reason, ShardLock lock, IndexSettings indexS * This method deletes the shard contents on disk for the given shard ID. This method will fail if the shard deleting * is prevented by {@link #canDeleteShardContent(ShardId, IndexSettings)} * of if the shards lock can not be acquired. - * + *

                          * On data nodes, if the deleted shard is the last shard folder in its index, the method will attempt to remove * the index folder as well. * diff --git a/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java b/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java index d9a64781c3f46..e345b613eebbd 100644 --- a/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java +++ b/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java @@ -54,7 +54,7 @@ /** * This class contains the logic used to check the cluster-wide shard limit before shards are created and ensuring that the limit is * updated correctly on setting updates, etc. - * + *

                          * NOTE: This is the limit applied at *shard creation time*. If you are looking for the limit applied at *allocation* time, which is * controlled by a different setting, * see {@link org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider}. diff --git a/server/src/main/java/org/opensearch/indices/analysis/PreBuiltCacheFactory.java b/server/src/main/java/org/opensearch/indices/analysis/PreBuiltCacheFactory.java index 601bd79a24746..13cc78b620b9a 100644 --- a/server/src/main/java/org/opensearch/indices/analysis/PreBuiltCacheFactory.java +++ b/server/src/main/java/org/opensearch/indices/analysis/PreBuiltCacheFactory.java @@ -48,11 +48,11 @@ public class PreBuiltCacheFactory { /** * The strategy of caching the analyzer - * - * ONE Exactly one version is stored. Useful for analyzers which do not store version information - * LUCENE Exactly one version for each lucene version is stored. Useful to prevent different analyzers with the same version - * OPENSEARCH Exactly one version per opensearch version is stored. Useful if you change an analyzer between opensearch - * releases, when the lucene version does not change + *

                            + *
                          • ONE : Exactly one version is stored. Useful for analyzers which do not store version information
                          • + *
                          • LUCENE : Exactly one version for each lucene version is stored. Useful to prevent different analyzers with the same version
                          • + *
                          • OPENSEARCH : Exactly one version per opensearch version is stored. Useful if you change an analyzer between opensearch releases, when the lucene version does not change
                          • + *
                          */ public enum CachingStrategy { ONE, diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTargetHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTargetHandler.java index ac28dabf815a5..707e41c8c27e1 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTargetHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTargetHandler.java @@ -55,7 +55,7 @@ public interface RecoveryTargetHandler extends FileChunkWriter { /** * Used with Segment replication only - * + *

                          * This function is used to force a sync target primary node with source (old primary). This is to avoid segment files * conflict with replicas when target is promoted as primary. */ diff --git a/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java index 36beabc4a9026..66f5b13449f05 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -189,7 +189,7 @@ public void indexTranslogOperations( /** * Used with Segment replication only - * + *

                          * This function is used to force a sync target primary node with source (old primary). This is to avoid segment files * conflict with replicas when target is promoted as primary. */ diff --git a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java index 268894b1c0af3..33967c0203516 100644 --- a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java +++ b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java @@ -61,8 +61,8 @@ class OngoingSegmentReplications { this.allocationIdToHandlers = ConcurrentCollections.newConcurrentMap(); } - /** - * Operations on the {@link #copyStateMap} member. + /* + Operations on the {@link #copyStateMap} member. */ /** @@ -85,12 +85,12 @@ synchronized CopyState getCachedCopyState(ReplicationCheckpoint checkpoint) thro // build the CopyState object and cache it before returning final CopyState copyState = new CopyState(checkpoint, indexShard); - /** - * Use the checkpoint from the request as the key in the map, rather than - * the checkpoint from the created CopyState. This maximizes cache hits - * if replication targets make a request with an older checkpoint. - * Replication targets are expected to fetch the checkpoint in the response - * CopyState to bring themselves up to date. + /* + Use the checkpoint from the request as the key in the map, rather than + the checkpoint from the created CopyState. This maximizes cache hits + if replication targets make a request with an older checkpoint. + Replication targets are expected to fetch the checkpoint in the response + CopyState to bring themselves up to date. */ addToCopyStateMap(checkpoint, copyState); return copyState; diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java index 39649727e31d7..3775be7b6da15 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java @@ -58,8 +58,7 @@ public class ReplicationRequestTracker { * This method will mark that a request with a unique sequence number has been received. If this is the * first time the unique request has been received, this method will return a listener to be completed. * The caller should then perform the requested action and complete the returned listener. - * - * + *

                          * If the unique request has already been received, this method will either complete the provided listener * or attach that listener to the listener returned in the first call. In this case, the method will * return null and the caller should not perform the requested action as a prior caller is already diff --git a/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java b/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java index f97388a8262ff..5185b740d90cb 100644 --- a/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java +++ b/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java @@ -74,7 +74,7 @@ private ConfigurationUtils() {} /** * Returns and removes the specified optional property from the specified configuration map. - * + *

                          * If the property value isn't of type string a {@link OpenSearchParseException} is thrown. */ public static String readOptionalStringProperty( @@ -89,7 +89,7 @@ public static String readOptionalStringProperty( /** * Returns and removes the specified property from the specified configuration map. - * + *

                          * If the property value isn't of type string an {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -104,7 +104,7 @@ public static String readStringProperty( /** * Returns and removes the specified property from the specified configuration map. - * + *

                          * If the property value isn't of type string a {@link OpenSearchParseException} is thrown. * If the property is missing and no default value has been specified a {@link OpenSearchParseException} is thrown */ @@ -141,7 +141,7 @@ private static String readString(String processorType, String processorTag, Stri /** * Returns and removes the specified property from the specified configuration map. - * + *

                          * If the property value isn't of type string or int a {@link OpenSearchParseException} is thrown. * If the property is missing and no default value has been specified a {@link OpenSearchParseException} is thrown */ @@ -180,7 +180,7 @@ private static String readStringOrInt(String processorType, String processorTag, /** * Returns and removes the specified property from the specified configuration map. - * + *

                          * If the property value isn't of type string or int a {@link OpenSearchParseException} is thrown. */ public static String readOptionalStringOrIntProperty( @@ -228,7 +228,7 @@ private static Boolean readBoolean(String processorType, String processorTag, St /** * Returns and removes the specified property from the specified configuration map. - * + *

                          * If the property value isn't of type int a {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -257,7 +257,7 @@ public static Integer readIntProperty( /** * Returns and removes the specified property from the specified configuration map. - * + *

                          * If the property value isn't of type int a {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -285,7 +285,7 @@ public static Double readDoubleProperty( /** * Returns and removes the specified property of type list from the specified configuration map. - * + *

                          * If the property value isn't of type list an {@link OpenSearchParseException} is thrown. */ public static List readOptionalList( @@ -303,7 +303,7 @@ public static List readOptionalList( /** * Returns and removes the specified property of type list from the specified configuration map. - * + *

                          * If the property value isn't of type list an {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -333,7 +333,7 @@ private static List readList(String processorType, String processorTag, S /** * Returns and removes the specified property of type map from the specified configuration map. - * + *

                          * If the property value isn't of type map an {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -353,7 +353,7 @@ public static Map readMap( /** * Returns and removes the specified property of type map from the specified configuration map. - * + *

                          * If the property value isn't of type map an {@link OpenSearchParseException} is thrown. */ public static Map readOptionalMap( diff --git a/server/src/main/java/org/opensearch/ingest/IngestService.java b/server/src/main/java/org/opensearch/ingest/IngestService.java index 91003e963e302..baf357a4bc0d5 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestService.java +++ b/server/src/main/java/org/opensearch/ingest/IngestService.java @@ -695,7 +695,7 @@ public IngestStats stats() { /** * Adds a listener that gets invoked with the current cluster state before processor factories * get invoked. - * + *

                          * This is useful for components that are used by ingest processors, so that they have the opportunity to update * before these components get used by the ingest processor factory. */ diff --git a/server/src/main/java/org/opensearch/ingest/Pipeline.java b/server/src/main/java/org/opensearch/ingest/Pipeline.java index 37643e48ed2be..2541cfbf4af77 100644 --- a/server/src/main/java/org/opensearch/ingest/Pipeline.java +++ b/server/src/main/java/org/opensearch/ingest/Pipeline.java @@ -123,8 +123,8 @@ public static Pipeline create( /** * Modifies the data of a document to be indexed based on the processor this pipeline holds - * - * If null is returned then this document will be dropped and not indexed, otherwise + *

                          + * If {@code null} is returned then this document will be dropped and not indexed, otherwise * this document will be kept and indexed. */ public void execute(IngestDocument ingestDocument, BiConsumer handler) { diff --git a/server/src/main/java/org/opensearch/ingest/Processor.java b/server/src/main/java/org/opensearch/ingest/Processor.java index e0f196dfcb115..6097045a87e21 100644 --- a/server/src/main/java/org/opensearch/ingest/Processor.java +++ b/server/src/main/java/org/opensearch/ingest/Processor.java @@ -48,7 +48,7 @@ /** * A processor implementation may modify the data belonging to a document. * Whether changes are made and what exactly is modified is up to the implementation. - * + *

                          * Processors may get called concurrently and thus need to be thread-safe. * * @opensearch.internal @@ -57,7 +57,7 @@ public interface Processor { /** * Introspect and potentially modify the incoming data. - * + *

                          * Expert method: only override this method if a processor implementation needs to make an asynchronous call, * otherwise just overwrite {@link #execute(IngestDocument)}. */ diff --git a/server/src/main/java/org/opensearch/ingest/ValueSource.java b/server/src/main/java/org/opensearch/ingest/ValueSource.java index 0ef7c3373596d..3463fb0f83b26 100644 --- a/server/src/main/java/org/opensearch/ingest/ValueSource.java +++ b/server/src/main/java/org/opensearch/ingest/ValueSource.java @@ -56,7 +56,7 @@ public interface ValueSource { /** * Returns a copy of the value this ValueSource holds and resolves templates if there're any. - * + *

                          * For immutable values only a copy of the reference to the value is made. * * @param model The model to be used when resolving any templates diff --git a/server/src/main/java/org/opensearch/monitor/os/OsProbe.java b/server/src/main/java/org/opensearch/monitor/os/OsProbe.java index 98229941252ba..a0a14372aa31a 100644 --- a/server/src/main/java/org/opensearch/monitor/os/OsProbe.java +++ b/server/src/main/java/org/opensearch/monitor/os/OsProbe.java @@ -59,13 +59,13 @@ /** * The {@link OsProbe} class retrieves information about the physical and swap size of the machine * memory, as well as the system load average and cpu load. - * + *

                          * In some exceptional cases, it's possible the underlying native methods used by * {@link #getFreePhysicalMemorySize()}, {@link #getTotalPhysicalMemorySize()}, * {@link #getFreeSwapSpaceSize()}, and {@link #getTotalSwapSpaceSize()} can return a * negative value. Because of this, we prevent those methods from returning negative values, * returning 0 instead. - * + *

                          * The OS can report a negative number in a number of cases: * - Non-supported OSes (HP-UX, or AIX) * - A failure of macOS to initialize host statistics @@ -183,11 +183,11 @@ public long getTotalSwapSpaceSize() { /** * The system load averages as an array. - * + *

                          * On Windows, this method returns {@code null}. - * + *

                          * On Linux, this method returns the 1, 5, and 15-minute load averages. - * + *

                          * On macOS, this method should return the 1-minute load average. * * @return the available system load averages or {@code null} diff --git a/server/src/main/java/org/opensearch/persistent/NodePersistentTasksExecutor.java b/server/src/main/java/org/opensearch/persistent/NodePersistentTasksExecutor.java index 209df1e1f498d..403630b89e42a 100644 --- a/server/src/main/java/org/opensearch/persistent/NodePersistentTasksExecutor.java +++ b/server/src/main/java/org/opensearch/persistent/NodePersistentTasksExecutor.java @@ -37,7 +37,7 @@ /** * This component is responsible for execution of persistent tasks. - * + *

                          * It abstracts away the execution of tasks and greatly simplifies testing of PersistentTasksNodeService * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java index 89bb23930b063..4e38fb34dbf17 100644 --- a/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java @@ -323,7 +323,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS /** * This unassigns a task from any node, i.e. it is assigned to a {@code null} node with the provided reason. - * + *

                          * Since the assignment executor node is null, the {@link PersistentTasksClusterService} will attempt to reassign it to a valid * node quickly. * diff --git a/server/src/main/java/org/opensearch/plugins/CircuitBreakerPlugin.java b/server/src/main/java/org/opensearch/plugins/CircuitBreakerPlugin.java index 7e7345811246f..3552c8286b7a3 100644 --- a/server/src/main/java/org/opensearch/plugins/CircuitBreakerPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/CircuitBreakerPlugin.java @@ -46,9 +46,9 @@ public interface CircuitBreakerPlugin { /** * Each of the factory functions are passed to the configured {@link CircuitBreakerService}. - * + *

                          * The service then constructs a {@link CircuitBreaker} given the resulting {@link BreakerSettings}. - * + *

                          * Custom circuit breakers settings can be found in {@link BreakerSettings}. * See: * - limit (example: `breaker.foo.limit`) {@link BreakerSettings#CIRCUIT_BREAKER_LIMIT_SETTING} @@ -63,7 +63,7 @@ public interface CircuitBreakerPlugin { /** * The passed {@link CircuitBreaker} object is the same one that was constructed by the {@link BreakerSettings} * provided by {@link CircuitBreakerPlugin#getCircuitBreaker(Settings)}. - * + *

                          * This reference should never change throughout the lifetime of the node. * * @param circuitBreaker The constructed {@link CircuitBreaker} object from the {@link BreakerSettings} diff --git a/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java b/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java index c2e147b86d17f..1edd9f52d97a7 100644 --- a/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java @@ -64,7 +64,7 @@ default Collection createAllocationDeciders(Settings settings /** * Return {@link ShardsAllocator} implementations added by this plugin. - * + *

                          * The key of the returned {@link Map} is the name of the allocator, and the value * is a function to construct the allocator. * @@ -88,7 +88,7 @@ default Map getExistingShardsAllocators() { /** * Called when the node is started * - * DEPRECATED: Use {@link #onNodeStarted(DiscoveryNode)} for newer implementations. + * @deprecated Use {@link #onNodeStarted(DiscoveryNode)} for newer implementations. */ @Deprecated default void onNodeStarted() {} diff --git a/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java b/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java index bca72942bd70e..63f0d826b592f 100644 --- a/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java @@ -68,13 +68,10 @@ public interface DiscoveryPlugin { * This can be handy if you want to provide your own Network interface name like _mycard_ * and implement by yourself the logic to get an actual IP address/hostname based on this * name. - * + *

                          * For example: you could call a third party service (an API) to resolve _mycard_. * Then you could define in opensearch.yml settings like: - * - *

                          {@code
                          -     * network.host: _mycard_
                          -     * }
                          + * {@code network.host: _mycard_ } */ default NetworkService.CustomNameResolver getCustomNameResolver(Settings settings) { return null; @@ -82,7 +79,7 @@ default NetworkService.CustomNameResolver getCustomNameResolver(Settings setting /** * Returns providers of seed hosts for discovery. - * + *

                          * The key of the returned map is the name of the host provider * (see {@link org.opensearch.discovery.DiscoveryModule#DISCOVERY_SEED_PROVIDERS_SETTING}), and * the value is a supplier to construct the host provider when it is selected for use. diff --git a/server/src/main/java/org/opensearch/plugins/EnginePlugin.java b/server/src/main/java/org/opensearch/plugins/EnginePlugin.java index 92ae2ff9cd661..5ea5442d84ffa 100644 --- a/server/src/main/java/org/opensearch/plugins/EnginePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/EnginePlugin.java @@ -92,7 +92,7 @@ default Optional getCustomCodecServiceFactory(IndexSettings * When an index is created this method is invoked for each engine plugin. Engine plugins that need to provide a * custom {@link TranslogDeletionPolicy} can override this method to return a function that takes the {@link IndexSettings} * and a {@link Supplier} for {@link RetentionLeases} and returns a custom {@link TranslogDeletionPolicy}. - * + *

                          * Only one of the installed Engine plugins can override this otherwise {@link IllegalStateException} will be thrown. * * @return a function that returns an instance of {@link TranslogDeletionPolicy} diff --git a/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java b/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java index 4dd4010383934..367d335ac4fea 100644 --- a/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java @@ -36,7 +36,7 @@ /** * An extension point for {@link Plugin} implementations to be themselves extensible. - * + *

                          * This class provides a callback for extensible plugins to be informed of other plugins * which extend them. * @@ -62,7 +62,7 @@ interface ExtensionLoader { /** * Allow this plugin to load extensions from other plugins. - * + *

                          * This method is called once only, after initializing this plugin and all plugins extending this plugin. It is called before * any other methods on this Plugin instance are called. */ diff --git a/server/src/main/java/org/opensearch/plugins/IdentityPlugin.java b/server/src/main/java/org/opensearch/plugins/IdentityPlugin.java index 00f3f8aff585c..410535504f0dd 100644 --- a/server/src/main/java/org/opensearch/plugins/IdentityPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/IdentityPlugin.java @@ -19,16 +19,14 @@ public interface IdentityPlugin { /** - * Get the current subject - * - * Should never return null + * Get the current subject. + * @return Should never return null * */ public Subject getSubject(); /** * Get the Identity Plugin's token manager implementation - * - * Should never return null + * @return Should never return null. */ public TokenManager getTokenManager(); } diff --git a/server/src/main/java/org/opensearch/plugins/IngestPlugin.java b/server/src/main/java/org/opensearch/plugins/IngestPlugin.java index dfc11aa8eea55..dc4f22de71344 100644 --- a/server/src/main/java/org/opensearch/plugins/IngestPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/IngestPlugin.java @@ -46,7 +46,7 @@ public interface IngestPlugin { /** * Returns additional ingest processor types added by this plugin. - * + *

                          * The key of the returned {@link Map} is the unique name for the processor which is specified * in pipeline configurations, and the value is a {@link org.opensearch.ingest.Processor.Factory} * to create the processor from a given pipeline configuration. diff --git a/server/src/main/java/org/opensearch/plugins/Plugin.java b/server/src/main/java/org/opensearch/plugins/Plugin.java index 0743cd3807eff..48486a6b55dfd 100644 --- a/server/src/main/java/org/opensearch/plugins/Plugin.java +++ b/server/src/main/java/org/opensearch/plugins/Plugin.java @@ -121,7 +121,7 @@ public Collection> getGuiceServiceClasses() /** * Returns components added by this plugin. - * + *

                          * Any components returned that implement {@link LifecycleComponent} will have their lifecycle managed. * Note: To aid in the migration away from guice, all objects returned as components will be bound in guice * to themselves. diff --git a/server/src/main/java/org/opensearch/plugins/PluginsService.java b/server/src/main/java/org/opensearch/plugins/PluginsService.java index 47195a0264750..cc9cc5b5b5fbf 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginsService.java +++ b/server/src/main/java/org/opensearch/plugins/PluginsService.java @@ -468,7 +468,7 @@ private static Bundle readPluginBundle(final Set bundles, final Path plu /** * Return the given bundles, sorted in dependency loading order. - * + *

                          * This sort is stable, so that if two plugins do not have any interdependency, * their relative order from iteration of the provided set will not change. * diff --git a/server/src/main/java/org/opensearch/plugins/RepositoryPlugin.java b/server/src/main/java/org/opensearch/plugins/RepositoryPlugin.java index 189ba3cfc16ab..09233d49f3aea 100644 --- a/server/src/main/java/org/opensearch/plugins/RepositoryPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/RepositoryPlugin.java @@ -52,9 +52,8 @@ public interface RepositoryPlugin { * Returns repository types added by this plugin. * * @param env The environment for the local node, which may be used for the local settings and path.repo - * - * The key of the returned {@link Map} is the type name of the repository and - * the value is a factory to construct the {@link Repository} interface. + * The key of the returned {@link Map} is the type name of the repository and + * the value is a factory to construct the {@link Repository} interface. */ default Map getRepositories( Environment env, @@ -70,9 +69,8 @@ default Map getRepositories( * through the external API. * * @param env The environment for the local node, which may be used for the local settings and path.repo - * - * The key of the returned {@link Map} is the type name of the repository and - * the value is a factory to construct the {@link Repository} interface. + * The key of the returned {@link Map} is the type name of the repository and + * the value is a factory to construct the {@link Repository} interface. */ default Map getInternalRepositories( Environment env, diff --git a/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java b/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java index d2ef2b65c5944..7288a8caaec58 100644 --- a/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java @@ -35,7 +35,7 @@ public interface SearchPipelinePlugin { /** * Returns additional search pipeline request processor types added by this plugin. - * + *

                          * The key of the returned {@link Map} is the unique name for the processor which is specified * in pipeline configurations, and the value is a {@link org.opensearch.search.pipeline.Processor.Factory} * to create the processor from a given pipeline configuration. @@ -46,7 +46,7 @@ default Map> getRequestProcess /** * Returns additional search pipeline response processor types added by this plugin. - * + *

                          * The key of the returned {@link Map} is the unique name for the processor which is specified * in pipeline configurations, and the value is a {@link org.opensearch.search.pipeline.Processor.Factory} * to create the processor from a given pipeline configuration. @@ -57,7 +57,7 @@ default Map> getResponseProce /** * Returns additional search pipeline search phase results processor types added by this plugin. - * + *

                          * The key of the returned {@link Map} is the unique name for the processor which is specified * in pipeline configurations, and the value is a {@link org.opensearch.search.pipeline.Processor.Factory} * to create the processor from a given pipeline configuration. diff --git a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java index 8b5920e1f4485..40b4f97cd1897 100644 --- a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java @@ -551,8 +551,8 @@ class CompositeAggregationSpec { private final Consumer aggregatorRegistrar; private final Class valueSourceBuilderClass; @Deprecated - /** This is added for backward compatibility, you don't need to set it, as we use aggregationType instead of - * byte code + /* This is added for backward compatibility, you don't need to set it, as we use aggregationType instead of + byte code */ private Byte byteCode; private final CompositeAggregationParsingFunction parsingFunction; diff --git a/server/src/main/java/org/opensearch/repositories/IndexId.java b/server/src/main/java/org/opensearch/repositories/IndexId.java index 0b0644ce932e5..d88eeb5aa9c49 100644 --- a/server/src/main/java/org/opensearch/repositories/IndexId.java +++ b/server/src/main/java/org/opensearch/repositories/IndexId.java @@ -80,7 +80,7 @@ public String getName() { * The unique ID for the index within the repository. This is *not* the same as the * index's UUID, but merely a unique file/URL friendly identifier that a repository can * use to name blobs for the index. - * + *

                          * We could not use the index's actual UUID (See {@link Index#getUUID()}) because in the * case of snapshot/restore, the index UUID in the snapshotted index will be different * from the index UUID assigned to it when it is restored. Hence, the actual index UUID diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 8e6649c366eeb..69883e0d19c8d 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -2547,7 +2547,7 @@ private RepositoryMetadata getRepoMetadata(ClusterState state) { * the next version number from when the index blob was written. Each individual index-N blob is * only written once and never overwritten. The highest numbered index-N blob is the latest one * that contains the current snapshots in the repository. - * + *

                          * Package private for testing */ long latestIndexBlobId() throws IOException { diff --git a/server/src/main/java/org/opensearch/rest/RestController.java b/server/src/main/java/org/opensearch/rest/RestController.java index ac30f999d0da7..cc48b59699a17 100644 --- a/server/src/main/java/org/opensearch/rest/RestController.java +++ b/server/src/main/java/org/opensearch/rest/RestController.java @@ -524,7 +524,7 @@ private void handleBadRequest(String uri, RestRequest.Method method, RestChannel /** * Attempts to extract auth token and login. * - * @returns false if there was an error and the request should not continue being dispatched + * @return false if there was an error and the request should not continue being dispatched * */ private boolean handleAuthenticateUser(final RestRequest request, final RestChannel channel) { try { diff --git a/server/src/main/java/org/opensearch/script/Script.java b/server/src/main/java/org/opensearch/script/Script.java index a611e71c3bf3f..9e74314c281cd 100644 --- a/server/src/main/java/org/opensearch/script/Script.java +++ b/server/src/main/java/org/opensearch/script/Script.java @@ -70,9 +70,9 @@ * {@link Script} represents used-defined input that can be used to * compile and execute a script from the {@link ScriptService} * based on the {@link ScriptType}. - * + *

                          * There are three types of scripts specified by {@link ScriptType}. - * + *

                          * The following describes the expected parameters for each type of script: * *

                            @@ -348,16 +348,16 @@ public static Script parse(Settings settings) { /** * This will parse XContent into a {@link Script}. The following formats can be parsed: - * + *

                            * The simple format defaults to an {@link ScriptType#INLINE} with no compiler options or user-defined params: - * + *

                            * Example: * {@code * "return Math.log(doc.popularity) * 100;" * } * * The complex format where {@link ScriptType} and idOrCode are required while lang, options and params are not required. - * + *

                            * {@code * { * // Exactly one of "id" or "source" must be specified @@ -391,7 +391,7 @@ public static Script parse(Settings settings) { * * This also handles templates in a special way. If a complexly formatted query is specified as another complex * JSON object the query is assumed to be a template, and the format will be preserved. - * + *

                            * {@code * { * "source" : { "query" : ... }, @@ -605,7 +605,7 @@ public void writeTo(StreamOutput out) throws IOException { /** * This will build scripts into the following XContent structure: - * + *

                            * {@code * { * "<(id, source)>" : "", @@ -635,10 +635,10 @@ public void writeTo(StreamOutput out) throws IOException { * } * * Note that lang, options, and params will only be included if there have been any specified. - * + *

                            * This also handles templates in a special way. If the {@link Script#CONTENT_TYPE_OPTION} option * is provided and the {@link ScriptType#INLINE} is specified then the template will be preserved as a raw field. - * + *

                            * {@code * { * "source" : { "query" : ... }, diff --git a/server/src/main/java/org/opensearch/script/ScriptCache.java b/server/src/main/java/org/opensearch/script/ScriptCache.java index 3c50fc12dcacb..fb57e7cdfa5bd 100644 --- a/server/src/main/java/org/opensearch/script/ScriptCache.java +++ b/server/src/main/java/org/opensearch/script/ScriptCache.java @@ -158,7 +158,7 @@ public ScriptContextStats stats(String context) { /** * Check whether there have been too many compilations within the last minute, throwing a circuit breaking exception if so. * This is a variant of the token bucket algorithm: https://en.wikipedia.org/wiki/Token_bucket - * + *

                            * It can be thought of as a bucket with water, every time the bucket is checked, water is added proportional to the amount of time that * elapsed since the last time it was checked. If there is enough water, some is removed and the request is allowed. If there is not * enough water the request is denied. Just like a normal bucket, if water is added that overflows the bucket, the extra water/capacity diff --git a/server/src/main/java/org/opensearch/script/ScriptContext.java b/server/src/main/java/org/opensearch/script/ScriptContext.java index 71ced303b062e..2180b6059dbef 100644 --- a/server/src/main/java/org/opensearch/script/ScriptContext.java +++ b/server/src/main/java/org/opensearch/script/ScriptContext.java @@ -40,7 +40,7 @@ /** * The information necessary to compile and run a script. - * + *

                            * A {@link ScriptContext} contains the information related to a single use case and the interfaces * and methods necessary for a {@link ScriptEngine} to implement. *

                            diff --git a/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java b/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java index ef64959f99a3c..30756ff702e8f 100644 --- a/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java +++ b/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java @@ -56,11 +56,11 @@ /** * The allowable types, languages and their corresponding contexts. When serialized there is a top level types_allowed list, * meant to reflect the setting script.allowed_types with the allowed types (eg inline, stored). - * + *

                            * The top-level language_contexts list of objects have the language (eg. painless, * mustache) and a list of contexts available for the language. It is the responsibility of the caller to ensure * these contexts are filtered by the script.allowed_contexts setting. - * + *

                            * The json serialization of the object has the form: * * { diff --git a/server/src/main/java/org/opensearch/script/ScriptMetadata.java b/server/src/main/java/org/opensearch/script/ScriptMetadata.java index 5f529fccd213c..fd92d8f7f02db 100644 --- a/server/src/main/java/org/opensearch/script/ScriptMetadata.java +++ b/server/src/main/java/org/opensearch/script/ScriptMetadata.java @@ -183,9 +183,9 @@ static ScriptMetadata deleteStoredScript(ScriptMetadata previous, String id) { /** * This will parse XContent into {@link ScriptMetadata}. - * + *

                            * The following format will be parsed: - * + *

                            * {@code * { * "" : "<{@link StoredScriptSource#fromXContent(XContentParser, boolean)}>", @@ -356,7 +356,7 @@ public void writeTo(StreamOutput out) throws IOException { /** * This will write XContent from {@link ScriptMetadata}. The following format will be written: - * + *

                            * {@code * { * "" : "<{@link StoredScriptSource#toXContent(XContentBuilder, Params)}>", diff --git a/server/src/main/java/org/opensearch/script/StoredScriptSource.java b/server/src/main/java/org/opensearch/script/StoredScriptSource.java index 0ff44b3af890a..d1dae67d0e55f 100644 --- a/server/src/main/java/org/opensearch/script/StoredScriptSource.java +++ b/server/src/main/java/org/opensearch/script/StoredScriptSource.java @@ -308,7 +308,7 @@ public static StoredScriptSource parse(BytesReference content, MediaType mediaTy /** * This will parse XContent into a {@link StoredScriptSource}. The following format is what will be parsed: - * + *

                            * {@code * { * "script" : { @@ -387,7 +387,7 @@ public void writeTo(StreamOutput out) throws IOException { /** * This will write XContent from a {@link StoredScriptSource}. The following format will be written: - * + *

                            * {@code * { * "script" : { diff --git a/server/src/main/java/org/opensearch/search/MultiValueMode.java b/server/src/main/java/org/opensearch/search/MultiValueMode.java index 0fbd41f062710..d812f1290efe3 100644 --- a/server/src/main/java/org/opensearch/search/MultiValueMode.java +++ b/server/src/main/java/org/opensearch/search/MultiValueMode.java @@ -542,7 +542,7 @@ public static MultiValueMode fromString(String sortMode) { * Return a {@link NumericDocValues} instance that can be used to sort documents * with this mode and the provided values. When a document has no value, * missingValue is returned. - * + *

                            * Allowed Modes: SUM, AVG, MEDIAN, MIN, MAX */ public NumericDocValues select(final SortedNumericDocValues values) { @@ -583,12 +583,12 @@ protected long pick(SortedNumericDocValues values) throws IOException { /** * Return a {@link NumericDocValues} instance that can be used to sort root documents * with this mode, the provided values and filters for root/inner documents. - * + *

                            * For every root document, the values of its inner documents will be aggregated. * If none of the inner documents has a value, then missingValue is returned. - * + *

                            * Allowed Modes: SUM, AVG, MIN, MAX - * + *

                            * NOTE: Calling the returned instance on docs that are not root docs is illegal * The returned instance can only be evaluate the current and upcoming docs */ @@ -658,7 +658,7 @@ protected long pick( * Return a {@link NumericDoubleValues} instance that can be used to sort documents * with this mode and the provided values. When a document has no value, * missingValue is returned. - * + *

                            * Allowed Modes: SUM, AVG, MEDIAN, MIN, MAX */ public NumericDoubleValues select(final SortedNumericDoubleValues values) { @@ -694,12 +694,12 @@ protected double pick(SortedNumericDoubleValues values) throws IOException { /** * Return a {@link NumericDoubleValues} instance that can be used to sort root documents * with this mode, the provided values and filters for root/inner documents. - * + *

                            * For every root document, the values of its inner documents will be aggregated. * If none of the inner documents has a value, then missingValue is returned. - * + *

                            * Allowed Modes: SUM, AVG, MIN, MAX - * + *

                            * NOTE: Calling the returned instance on docs that are not root docs is illegal * The returned instance can only be evaluate the current and upcoming docs */ @@ -761,7 +761,7 @@ protected double pick( * Return a {@link BinaryDocValues} instance that can be used to sort documents * with this mode and the provided values. When a document has no value, * missingValue is returned. - * + *

                            * Allowed Modes: MIN, MAX */ public BinaryDocValues select(final SortedBinaryDocValues values, final BytesRef missingValue) { @@ -816,12 +816,12 @@ protected BytesRef pick(SortedBinaryDocValues values) throws IOException { /** * Return a {@link BinaryDocValues} instance that can be used to sort root documents * with this mode, the provided values and filters for root/inner documents. - * + *

                            * For every root document, the values of its inner documents will be aggregated. * If none of the inner documents has a value, then missingValue is returned. - * + *

                            * Allowed Modes: MIN, MAX - * + *

                            * NOTE: Calling the returned instance on docs that are not root docs is illegal * The returned instance can only be evaluate the current and upcoming docs */ @@ -889,7 +889,7 @@ protected BytesRef pick( /** * Return a {@link SortedDocValues} instance that can be used to sort documents * with this mode and the provided values. - * + *

                            * Allowed Modes: MIN, MAX */ public SortedDocValues select(final SortedSetDocValues values) { @@ -949,11 +949,11 @@ protected int pick(SortedSetDocValues values) throws IOException { /** * Return a {@link SortedDocValues} instance that can be used to sort root documents * with this mode, the provided values and filters for root/inner documents. - * + *

                            * For every root document, the values of its inner documents will be aggregated. - * + *

                            * Allowed Modes: MIN, MAX - * + *

                            * NOTE: Calling the returned instance on docs that are not root docs is illegal * The returned instance can only be evaluate the current and upcoming docs */ diff --git a/server/src/main/java/org/opensearch/search/SearchExtBuilder.java b/server/src/main/java/org/opensearch/search/SearchExtBuilder.java index 4d86c6c2e2277..1df58666f6fdb 100644 --- a/server/src/main/java/org/opensearch/search/SearchExtBuilder.java +++ b/server/src/main/java/org/opensearch/search/SearchExtBuilder.java @@ -47,10 +47,10 @@ * Any state needs to be serialized as part of the {@link Writeable#writeTo(StreamOutput)} method and * read from the incoming stream, usually done adding a constructor that takes {@link StreamInput} as * an argument. - * + *

                            * Registration happens through {@link SearchPlugin#getSearchExts()}, which also needs a {@link CheckedFunction} that's able to parse * the incoming request from the REST layer into the proper {@link SearchExtBuilder} subclass. - * + *

                            * {@link #getWriteableName()} must return the same name as the one used for the registration * of the {@link SearchExtSpec}. * diff --git a/server/src/main/java/org/opensearch/search/SearchHit.java b/server/src/main/java/org/opensearch/search/SearchHit.java index 49f1cc585fd80..5649ec383a9e9 100644 --- a/server/src/main/java/org/opensearch/search/SearchHit.java +++ b/server/src/main/java/org/opensearch/search/SearchHit.java @@ -1035,7 +1035,7 @@ public int getOffset() { /** * Returns the next child nested level if there is any, otherwise null is returned. - * + *

                            * In the case of mappings with multiple levels of nested object fields */ public NestedIdentity getChild() { diff --git a/server/src/main/java/org/opensearch/search/SearchModule.java b/server/src/main/java/org/opensearch/search/SearchModule.java index 0f1030b87c036..62d397de58187 100644 --- a/server/src/main/java/org/opensearch/search/SearchModule.java +++ b/server/src/main/java/org/opensearch/search/SearchModule.java @@ -327,7 +327,7 @@ public class SearchModule { /** * Constructs a new SearchModule object - * + *

                            * NOTE: This constructor should not be called in production unless an accurate {@link Settings} object is provided. * When constructed, a static flag is set in Lucene {@link BooleanQuery#setMaxClauseCount} according to the settings. * @param settings Current settings diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java b/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java index 392c65ce27aea..47e9def094623 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java @@ -135,7 +135,7 @@ public ScoreMode scoreMode() { * Returns a converter for point values if it's safe to use the indexed data instead of * doc values. Generally, this means that the query has no filters or scripts, the aggregation is * top level, and the underlying field is indexed, and the index is sorted in the right order. - * + *

                            * If those conditions aren't met, return null to indicate a point reader cannot * be used in this case. * diff --git a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java index e4d64e1e8517c..288c7a64ba795 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java +++ b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java @@ -125,7 +125,7 @@ public double sortValue(AggregationPath.PathElement head, Iterator * This method first reduces the aggregations, and if it is the final reduce, then reduce the pipeline * aggregations (both embedded parent/sibling as well as top-level sibling pipelines) */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java index 67af0b13eed3b..eef427754f535 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java @@ -132,7 +132,7 @@ public final void collectExistingBucket(LeafBucketCollector subCollector, int do /** * This only tidies up doc counts. Call {@link MergingBucketsDeferringCollector#mergeBuckets(long[])} to merge the actual * ordinals and doc ID deltas. - * + *

                            * Refer to that method for documentation about the merge map. * * @deprecated use {@link mergeBuckets(long, LongUnaryOperator)} @@ -146,7 +146,7 @@ public final void mergeBuckets(long[] mergeMap, long newNumBuckets) { * * @param mergeMap a unary operator which maps a bucket's ordinal to the ordinal it should be merged with. * If a bucket's ordinal is mapped to -1 then the bucket is removed entirely. - * + *

                            * This only tidies up doc counts. Call {@link MergingBucketsDeferringCollector#mergeBuckets(LongUnaryOperator)} to * merge the actual ordinals and doc ID deltas. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java index de74055bb94f3..dfb8f6be7155d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java @@ -101,7 +101,7 @@ private GeoTileUtils() {} /** * Parse an integer precision (zoom level). The {@link ValueType#INT} allows it to be a number or a string. - * + *

                            * The precision is expressed as a zoom level between 0 and {@link #MAX_ZOOM} (inclusive). * * @param parser {@link XContentParser} to parse the value from diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java index 8d35c1edc8cb0..3e9424eda92a9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java @@ -55,16 +55,17 @@ public MergingBucketsDeferringCollector(SearchContext context, boolean isGlobal) /** * Merges/prunes the existing bucket ordinals and docDeltas according to the provided mergeMap. - * + *

                            * The mergeMap is an array where the index position represents the current bucket ordinal, and * the value at that position represents the ordinal the bucket should be merged with. If * the value is set to -1 it is removed entirely. - * + *

                            * For example, if the mergeMap [1,1,3,-1,3] is provided: - * - Buckets `0` and `1` will be merged to bucket ordinal `1` - * - Bucket `2` and `4` will be merged to ordinal `3` - * - Bucket `3` will be removed entirely - * + *

                              + *
                            • Buckets `0` and `1` will be merged to bucket ordinal `1`
                            • + *
                            • Bucket `2` and `4` will be merged to ordinal `3`
                            • + *
                            • Bucket `3` will be removed entirely
                            • + *
                            * This process rebuilds the ordinals and docDeltas according to the mergeMap, so it should * not be called unless there are actually changes to be made, to avoid unnecessary work. * @@ -80,7 +81,7 @@ public void mergeBuckets(long[] mergeMap) { * * @param mergeMap a unary operator which maps a bucket's ordinal to the ordinal it should be merged with. * If a bucket's ordinal is mapped to -1 then the bucket is removed entirely. - * + *

                            * This process rebuilds the ordinals and docDeltas according to the mergeMap, so it should * not be called unless there are actually changes to be made, to avoid unnecessary work. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index 366de8619fd55..fd94ba355238a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -216,7 +216,7 @@ public DateHistogramValuesSourceBuilder dateHistogramInterval(DateHistogramInter /** * Sets the interval of the DateHistogram using calendar units (`1d`, `1w`, `1M`, etc). These units * are calendar-aware, meaning they respect leap additions, variable days per month, etc. - * + *

                            * This is mutually exclusive with {@link DateHistogramValuesSourceBuilder#fixedInterval(DateHistogramInterval)} * * @param interval The calendar interval to use with the aggregation @@ -229,7 +229,7 @@ public DateHistogramValuesSourceBuilder calendarInterval(DateHistogramInterval i /** * Sets the interval of the DateHistogram using fixed units (`1ms`, `1s`, `10m`, `4h`, etc). These are * not calendar aware and are simply multiples of fixed, SI units. - * + *

                            * This is mutually exclusive with {@link DateHistogramValuesSourceBuilder#calendarInterval(DateHistogramInterval)} * * @param interval The fixed interval to use with the aggregation diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java index 27619044d8995..9f8a4cff5f3fc 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java @@ -142,10 +142,10 @@ public String getWriteableName() { @Override public InternalComposite create(List newBuckets) { - /** - * This is used by pipeline aggregations to filter/remove buckets so we - * keep the afterKey of the original aggregation in order - * to be able to retrieve the next page even if all buckets have been filtered. + /* + This is used by pipeline aggregations to filter/remove buckets so we + keep the afterKey of the original aggregation in order + to be able to retrieve the next page even if all buckets have been filtered. */ return new InternalComposite( name, @@ -473,8 +473,8 @@ public int compareKey(InternalBucket other) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - /** - * See {@link CompositeAggregation#bucketToXContent} + /* + See {@link CompositeAggregation#bucketToXContent} */ throw new UnsupportedOperationException("not implemented"); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/ParsedComposite.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/ParsedComposite.java index 8382b191025fe..2a1544e218f2c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/ParsedComposite.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/ParsedComposite.java @@ -70,9 +70,9 @@ public static ParsedComposite fromXContent(XContentParser parser, String name) t ParsedComposite aggregation = PARSER.parse(parser, null); aggregation.setName(name); if (aggregation.afterKey == null && aggregation.getBuckets().size() > 0) { - /** - * Previous versions (< 6.3) don't send afterKey - * in the response so we set it as the last returned buckets. + /* + Previous versions (< 6.3) don't send afterKey + in the response so we set it as the last returned buckets. */ aggregation.setAfterKey(aggregation.getBuckets().get(aggregation.getBuckets().size() - 1).key); } @@ -130,8 +130,8 @@ void setKey(Map key) { @Override public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - /** - * See {@link CompositeAggregation#bucketToXContent} + /* + See {@link CompositeAggregation#bucketToXContent} */ throw new UnsupportedOperationException("not implemented"); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java index bd0a4f13ddf08..9442529bf9342 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java @@ -62,7 +62,7 @@ abstract class SortedDocsProducer { * Visits all non-deleted documents in iterator and fills the provided queue * with the top composite buckets extracted from the collection. * Documents that contain a top composite bucket are added in the provided builder if it is not null. - * + *

                            * Returns true if the queue is full and the current leadSourceBucket did not produce any competitive * composite buckets. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java index 55c841f5b9c04..a0a636c121e12 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java @@ -75,7 +75,7 @@ public FilterAggregatorFactory( * necessary. This is done lazily so that the {@link Weight} is only created * if the aggregation collects documents reducing the overhead of the * aggregation in the case where no documents are collected. - * + *

                            * Note that as aggregations are initialsed and executed in a serial manner, * no concurrency considerations are necessary here. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java index 35d968b789a21..a8e157a1cbb79 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java @@ -92,7 +92,7 @@ public FiltersAggregatorFactory( * necessary. This is done lazily so that the {@link Weight}s are only * created if the aggregation collects documents reducing the overhead of * the aggregation in the case where no documents are collected. - * + *

                            * Note: With concurrent segment search use case, multiple aggregation collectors executing * on different threads will try to fetch the weights. To handle the race condition there is * a synchronization block diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java index d7f89225524c0..b4f1e78f77aaf 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java @@ -546,7 +546,7 @@ private int collectValue(long owningBucketOrd, int roundingIdx, int doc, long ro /** * Increase the rounding of {@code owningBucketOrd} using - * estimated, bucket counts, {@link #rebucket() rebucketing} the all + * estimated, bucket counts, {@link FromMany#rebucket()} rebucketing} the all * buckets if the estimated number of wasted buckets is too high. */ private int increaseRoundingIfNeeded(long owningBucketOrd, int oldEstimatedBucketCount, long newKey, int oldRounding) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index 181fb468f3356..a978b5cfa3e3e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -245,7 +245,7 @@ public DateHistogramAggregationBuilder dateHistogramInterval(DateHistogramInterv /** * Sets the interval of the DateHistogram using calendar units (`1d`, `1w`, `1M`, etc). These units * are calendar-aware, meaning they respect leap additions, variable days per month, etc. - * + *

                            * This is mutually exclusive with {@link DateHistogramAggregationBuilder#fixedInterval(DateHistogramInterval)} * * @param interval The calendar interval to use with the aggregation @@ -258,7 +258,7 @@ public DateHistogramAggregationBuilder calendarInterval(DateHistogramInterval in /** * Sets the interval of the DateHistogram using fixed units (`1ms`, `1s`, `10m`, `4h`, etc). These are * not calendar aware and are simply multiples of fixed, SI units. - * + *

                            * This is mutually exclusive with {@link DateHistogramAggregationBuilder#calendarInterval(DateHistogramInterval)} * * @param interval The fixed interval to use with the aggregation diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 8233c3d995dda..f602eea7a9b12 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -216,9 +216,7 @@ public void collectDebugInfo(BiConsumer add) { } /** - * Returns the size of the bucket in specified units. - * - * If unitSize is null, returns 1.0 + * @return the size of the bucket in specified units, or 1.0 if unitSize is null */ @Override public double bucketSize(long bucket, Rounding.DateTimeUnit unitSize) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramInterval.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramInterval.java index 8f8e71f3cd685..9f907bcacadf9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramInterval.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramInterval.java @@ -129,7 +129,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws * Converts this DateHistogramInterval into a millisecond representation. If this is a calendar * interval, it is an approximation of milliseconds based on the fixed equivalent (e.g. `1h` is treated as 60 * fixed minutes, rather than the hour at a specific point in time. - * + *

                            * This is merely a convenience helper for quick comparisons and should not be used for situations that * require precise durations. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java index a2c63cf25c0c2..1d21152b6f622 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java @@ -34,7 +34,7 @@ /** * A shared interface for aggregations that parse and use "interval" parameters. - * + *

                            * Provides definitions for the new fixed and calendar intervals, and deprecated * defintions for the old interval/dateHisto interval parameters * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java index 2f8d512b7511f..83c20ba1c1d04 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java @@ -53,13 +53,13 @@ /** * A class that handles all the parsing, bwc and deprecations surrounding date histogram intervals. - * + *

                            * - Provides parser helpers for the deprecated interval/dateHistogramInterval parameters. * - Provides parser helpers for the new calendar/fixed interval parameters * - Can read old intervals from a stream and convert to new intervals * - Can write new intervals to old format when streaming out * - Provides a variety of helper methods to interpret the intervals as different types, depending on caller's need - * + *

                            * After the deprecated parameters are removed, this class can be simplified greatly. The legacy options * will be removed, and the mutual-exclusion checks can be done in the setters directly removing the need * for the enum and the complicated "state machine" logic @@ -220,7 +220,7 @@ public DateHistogramInterval getAsCalendarInterval() { /** * Sets the interval of the DateHistogram using calendar units (`1d`, `1w`, `1M`, etc). These units * are calendar-aware, meaning they respect leap additions, variable days per month, etc. - * + *

                            * This is mutually exclusive with {@link DateIntervalWrapper#fixedInterval(DateHistogramInterval)} * * @param interval The fixed interval to use @@ -250,7 +250,7 @@ public DateHistogramInterval getAsFixedInterval() { /** * Sets the interval of the DateHistogram using fixed units (`1ms`, `1s`, `10m`, `4h`, etc). These are * not calendar aware and are simply multiples of fixed, SI units. - * + *

                            * This is mutually exclusive with {@link DateIntervalWrapper#calendarInterval(DateHistogramInterval)} * * @param interval The fixed interval to use diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBounds.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBounds.java index 69c70ed1bf7fd..235ae8fcee6d7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBounds.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBounds.java @@ -49,7 +49,7 @@ /** * Represent hard_bounds and extended_bounds in histogram aggregations. - * + *

                            * This class is similar to {@link LongBounds} used in date histograms, but is using longs to store data. LongBounds and DoubleBounds are * not used interchangeably and therefore don't share any common interfaces except for serialization. * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java index 52f689eb7c229..8ebd67bc1ebe5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java @@ -491,7 +491,7 @@ private void mergeBucketsWithPlan(List buckets, List plan, * Makes a merge plan by simulating the merging of the two closest buckets, until the target number of buckets is reached. * Distance is determined by centroid comparison. * Then, this plan is actually executed and the underlying buckets are merged. - * + *

                            * Requires: buckets is sorted by centroid. */ private void mergeBucketsIfNeeded(List buckets, int targetNumBuckets, ReduceContext reduceContext) { @@ -567,7 +567,7 @@ private void mergeBucketsWithSameMin(List buckets, ReduceContext reduceC /** * When two adjacent buckets A, B overlap (A.max > B.min) then their boundary is set to * the midpoint: (A.max + B.min) / 2. - * + *

                            * After this adjustment, A will contain more values than indicated and B will have less. */ private void adjustBoundsForOverlappingBuckets(List buckets, ReduceContext reduceContext) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/LongBounds.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/LongBounds.java index e138824e7fce8..ad6572916c84a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/LongBounds.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/LongBounds.java @@ -54,7 +54,7 @@ /** * Represent hard_bounds and extended_bounds in date-histogram aggregations. - * + *

                            * This class is similar to {@link DoubleBounds} used in histograms, but is using longs to store data. LongBounds and DoubleBounds are * * not used interchangeably and therefore don't share any common interfaces except for serialization. * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java index 11ff7dbc407cb..526945243c786 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java @@ -76,7 +76,7 @@ public class VariableWidthHistogramAggregator extends DeferableBucketAggregator /** * This aggregator goes through multiple phases of collection. Each phase has a different CollectionPhase::collectValue * implementation - * + *

                            * Running a clustering algorithm like K-Means is unfeasible because large indices don't fit into memory. * But having multiple collection phases lets us accurately bucket the docs in one pass. */ @@ -231,7 +231,7 @@ protected void swap(int i, int j) { * Produces a merge map where `mergeMap[i]` represents the index that values[i] * would be moved to if values were sorted * In other words, this method produces a merge map that will sort values - * + *

                            * See BucketsAggregator::mergeBuckets to learn more about the merge map */ public long[] generateMergeMap() { @@ -242,10 +242,10 @@ public long[] generateMergeMap() { /** * Sorting the documents by key lets us bucket the documents into groups with a single linear scan - * + *

                            * But we can't do this by just sorting buffer, because we also need to generate a merge map * for every change we make to the list, so that we can apply the changes to the underlying buckets as well. - * + *

                            * By just creating a merge map, we eliminate the need to actually sort buffer. We can just * use the merge map to find any doc's sorted index. */ @@ -347,7 +347,7 @@ private void createAndAppendNewCluster(double value) { /** * Move the last cluster to position idx * This is expensive because a merge map of size numClusters is created, so don't call this method too often - * + *

                            * TODO: Make this more efficient */ private void moveLastCluster(int index) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregator.java index 23e2bc6b3e54b..a886bdb3ae188 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregator.java @@ -55,7 +55,7 @@ /** * Aggregate on only the top-scoring docs on a shard. - * + *

                            * TODO currently the diversity feature of this agg offers only 'script' and * 'field' as a means of generating a de-dup value. In future it would be nice * if users could use any of the "bucket" aggs syntax (geo, date histogram...) diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java index 4d1cbd4ce72f1..5b90163fa3959 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java @@ -407,8 +407,8 @@ public int hashCode() { /** * Copy from InternalComposite - * - * Format obj using the provided {@link DocValueFormat}. + *

                            + * Format {@code obj} using the provided {@link DocValueFormat}. * If the format is equals to {@link DocValueFormat#RAW}, the object is returned as is * for numbers and a string for {@link BytesRef}s. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java index 341ac021f6e0c..0e773291881cf 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java @@ -436,11 +436,11 @@ public InternalAggregation reduce(List aggregations, Reduce } final List reducedBuckets; - /** - * Buckets returned by a partial reduce or a shard response are sorted by key. - * That allows to perform a merge sort when reducing multiple aggregations together. - * For backward compatibility, we disable the merge sort and use ({@link InternalTerms#reduceLegacy} if any of - * the provided aggregations use a different {@link InternalTerms#reduceOrder}. + /* + Buckets returned by a partial reduce or a shard response are sorted by key. + That allows to perform a merge sort when reducing multiple aggregations together. + For backward compatibility, we disable the merge sort and use ({@link InternalTerms#reduceLegacy} if any of + the provided aggregations use a different {@link InternalTerms#reduceOrder}. */ BucketOrder thisReduceOrder = getReduceOrder(aggregations); if (isKeyOrder(thisReduceOrder)) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java index 0482ef823818c..59f48bd7fbaba 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java @@ -94,10 +94,10 @@ public MultiTermsAggregator( this.partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this); // Todo, copy from TermsAggregator. need to remove duplicate code. if (subAggsNeedScore() && descendsFromNestedAggregator(parent)) { - /** - * Force the execution to depth_first because we need to access the score of - * nested documents in a sub-aggregation and we are not able to generate this score - * while replaying deferred documents. + /* + Force the execution to depth_first because we need to access the score of + nested documents in a sub-aggregation and we are not able to generate this score + while replaying deferred documents. */ this.collectMode = SubAggCollectionMode.DEPTH_FIRST; } else { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java index dc616ca7512be..5d83d926ab36f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java @@ -196,7 +196,7 @@ public double getPrecision() { * Set's the false-positive rate for individual cuckoo filters. Does not dictate the overall fpp rate * since we use a "scaling" cuckoo filter which adds more filters as required, and the overall * error rate grows differently than individual filters - * + *

                            * This value does, however, affect the overall space usage of the filter. Coarser precisions provide * more compact filters. The default is 0.01 */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java index 7cacf1e918380..845149d894aad 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java @@ -244,10 +244,10 @@ public TermsAggregator( partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this); this.format = format; if (subAggsNeedScore() && descendsFromNestedAggregator(parent)) { - /** - * Force the execution to depth_first because we need to access the score of - * nested documents in a sub-aggregation and we are not able to generate this score - * while replaying deferred documents. + /* + Force the execution to depth_first because we need to access the score of + nested documents in a sub-aggregation and we are not able to generate this score + while replaying deferred documents. */ this.collectMode = SubAggCollectionMode.DEPTH_FIRST; } else { diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLog.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLog.java index 6b998fc86361d..902e4d69ed5fa 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLog.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLog.java @@ -34,9 +34,9 @@ /** * Hyperloglog counter, implemented based on pseudo code from - * http://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf and its appendix - * https://docs.google.com/document/d/1gyjfMHy43U9OWBXxfaeG-3MjGzejW1dlpyMwEYAAWEI/view?fullscreen - * + * 40671.pdf and its + * appendix + *

                            * Trying to understand what this class does without having read the paper is considered adventurous. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java index 4354a23b70f6b..e74179b403e8e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java @@ -46,7 +46,6 @@ /** * Base class for HLL++ algorithms. - * * It contains methods for cloning and serializing the data structure. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractLinearCounting.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractLinearCounting.java index 3f5f524c9c2f5..7c00b25ae365f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractLinearCounting.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractLinearCounting.java @@ -36,11 +36,11 @@ /** * Linear counter, implemented based on pseudo code from - * http://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf and its appendix - * https://docs.google.com/document/d/1gyjfMHy43U9OWBXxfaeG-3MjGzejW1dlpyMwEYAAWEI/view?fullscreen - * + * 40671.pdf and its + * appendix + *

                            * Trying to understand what this class does without having read the paper is considered adventurous. - * + *

                            * The algorithm just keep a record of all distinct values provided encoded as an integer. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java index 762b10fc28fa7..fa8830de5dab9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java @@ -73,7 +73,7 @@ public static > ConstructingO ParseField valuesField ) { - /** + /* * This is a non-ideal ConstructingObjectParser, because it is a compromise between Percentiles and Ranks. * Ranks requires an array of values because there is no sane default, and we want to keep that in the ctor. * Percentiles has defaults, which means the API allows the user to either use the default or configure @@ -86,6 +86,7 @@ public static > ConstructingO * out the behavior from there * * `args` are provided from the ConstructingObjectParser in-order they are defined in the parser. So: + * * - args[0]: values * - args[1]: tdigest config options * - args[2]: hdr config options @@ -197,7 +198,7 @@ public boolean keyed() { /** * Expert: set the number of significant digits in the values. Only relevant * when using {@link PercentilesMethod#HDR}. - * + *

                            * Deprecated: set numberOfSignificantValueDigits by configuring a {@link PercentilesConfig.Hdr} instead * and set via {@link PercentilesAggregationBuilder#percentilesConfig(PercentilesConfig)} */ @@ -217,7 +218,7 @@ public T numberOfSignificantValueDigits(int numberOfSignificantValueDigits) { /** * Expert: get the number of significant digits in the values. Only relevant * when using {@link PercentilesMethod#HDR}. - * + *

                            * Deprecated: get numberOfSignificantValueDigits by inspecting the {@link PercentilesConfig} returned from * {@link PercentilesAggregationBuilder#percentilesConfig()} instead */ @@ -232,7 +233,7 @@ public int numberOfSignificantValueDigits() { /** * Expert: set the compression. Higher values improve accuracy but also * memory usage. Only relevant when using {@link PercentilesMethod#TDIGEST}. - * + *

                            * Deprecated: set compression by configuring a {@link PercentilesConfig.TDigest} instead * and set via {@link PercentilesAggregationBuilder#percentilesConfig(PercentilesConfig)} */ @@ -249,7 +250,7 @@ public T compression(double compression) { /** * Expert: get the compression. Higher values improve accuracy but also * memory usage. Only relevant when using {@link PercentilesMethod#TDIGEST}. - * + *

                            * Deprecated: get compression by inspecting the {@link PercentilesConfig} returned from * {@link PercentilesAggregationBuilder#percentilesConfig()} instead */ @@ -317,15 +318,15 @@ public T percentilesConfig(PercentilesConfig percentilesConfig) { /** * Return the current algo configuration, or a default (Tdigest) otherwise - * + *

                            * This is needed because builders don't have a "build" or "finalize" method, but * the old API did bake in defaults. Certain operations like xcontent, equals, hashcode * will use the values in the builder at any time and need to be aware of defaults. - * + *

                            * But to maintain BWC behavior as much as possible, we allow the user to set * algo settings independent of method. To keep life simple we use a null to track * if any method has been selected yet. - * + *

                            * However, this means we need a way to fetch the default if the user hasn't * selected any method and uses a builder-side feature like xcontent */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java index 7bd1e1aa22a90..7ab35eaed785c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java @@ -49,16 +49,16 @@ * Hyperloglog++ counter, implemented based on pseudo code from * http://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf and its appendix * https://docs.google.com/document/d/1gyjfMHy43U9OWBXxfaeG-3MjGzejW1dlpyMwEYAAWEI/view?fullscreen - * + *

                            * This implementation is different from the original implementation in that it uses a hash table instead of a sorted list for linear * counting. Although this requires more space and makes hyperloglog (which is less accurate) used sooner, this is also considerably faster. - * + *

                            * Trying to understand what this class does without having read the paper is considered adventurous. - * + *

                            * The HyperLogLogPlusPlus contains two algorithms, one for linear counting and the HyperLogLog algorithm. Initially hashes added to the * data structure are processed using the linear counting until a threshold defined by the precision is reached where the data is replayed * to the HyperLogLog algorithm and then this is used. - * + *

                            * It supports storing several HyperLogLogPlusPlus structures which are identified by a bucket number. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparse.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparse.java index 252df7358ddcd..558e9df93c804 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparse.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparse.java @@ -40,7 +40,7 @@ /** * AbstractHyperLogLogPlusPlus instance that only supports linear counting. The maximum number of hashes supported * by the structure is determined at construction time. - * + *

                            * This structure expects all the added values to be distinct and therefore there are no checks * if an element has been previously added. * diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java index be98df384fc28..6f9be06231819 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java @@ -52,7 +52,7 @@ /** * A field data based aggregator that counts the number of values a specific field has within the aggregation context. - * + *

                            * This aggregator works in a multi-bucket mode, that is, when serves as a sub-aggregator, a single aggregator instance aggregates the * counts for all buckets owned by the parent aggregator) * diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketHelpers.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketHelpers.java index 3015e7ce9f364..c7f2a29793bff 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketHelpers.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketHelpers.java @@ -64,7 +64,7 @@ public class BucketHelpers { * a date_histogram might have empty buckets due to no data existing for that time interval. * This can cause problems for operations like a derivative, which relies on a continuous * function. - * + *

                            * "insert_zeros": empty buckets will be filled with zeros for all metrics * "skip": empty buckets will simply be ignored * diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java index a4c3c14f3365f..3e97eda693b91 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java @@ -52,18 +52,18 @@ /** * This pipeline aggregation gives the user the ability to script functions that "move" across a window * of data, instead of single data points. It is the scripted version of MovingAvg pipeline agg. - * + *

                            * Through custom script contexts, we expose a number of convenience methods: - * - * - max - * - min - * - sum - * - unweightedAvg - * - linearWeightedAvg - * - ewma - * - holt - * - holtWintersMovAvg - * + *

                              + *
                            • max
                            • + *
                            • min
                            • + *
                            • sum
                            • + *
                            • unweightedAvg
                            • + *
                            • linearWeightedAvg
                            • + *
                            • ewma
                            • + *
                            • holt
                            • + *
                            • holtWintersMovAvg
                            • + *
                            * The user can also define any arbitrary logic via their own scripting, or combine with the above methods. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovingFunctions.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovingFunctions.java index bac486576f537..051b9c43f63f5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovingFunctions.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovingFunctions.java @@ -75,7 +75,7 @@ public static double sum(double[] values) { /** * Calculate a simple unweighted (arithmetic) moving average. - * + *

                            * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN. * The average is based on the count of non-null, non-NaN values. @@ -94,7 +94,7 @@ public static double unweightedAvg(double[] values) { /** * Calculate a standard deviation over the values using the provided average. - * + *

                            * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN. * The average is based on the count of non-null, non-NaN values. @@ -118,7 +118,7 @@ public static double stdDev(double[] values, double avg) { /** * Calculate a linearly weighted moving average, such that older values are * linearly less important. "Time" is determined by position in collection - * + *

                            * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN * The average is based on the count of non-null, non-NaN values. @@ -141,11 +141,11 @@ public static double linearWeightedAvg(double[] values) { /** * * Calculate a exponentially weighted moving average. - * + *

                            * Alpha controls the smoothing of the data. Alpha = 1 retains no memory of past values * (e.g. a random walk), while alpha = 0 retains infinite memory of past values (e.g. * the series mean). Useful values are somewhere in between. Defaults to 0.5. - * + *

                            * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN * The average is based on the count of non-null, non-NaN values. @@ -171,13 +171,13 @@ public static double ewma(double[] values, double alpha) { /** * Calculate a doubly exponential weighted moving average - * + *

                            * Alpha controls the smoothing of the data. Alpha = 1 retains no memory of past values * (e.g. a random walk), while alpha = 0 retains infinite memory of past values (e.g. * the series mean). Useful values are somewhere in between. Defaults to 0.5. - * + *

                            * Beta is equivalent to alpha, but controls the smoothing of the trend instead of the data - * + *

                            * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN * The average is based on the count of non-null, non-NaN values. @@ -241,14 +241,14 @@ public static double[] holtForecast(double[] values, double alpha, double beta, /** * Calculate a triple exponential weighted moving average - * + *

                            * Alpha controls the smoothing of the data. Alpha = 1 retains no memory of past values * (e.g. a random walk), while alpha = 0 retains infinite memory of past values (e.g. * the series mean). Useful values are somewhere in between. Defaults to 0.5. - * + *

                            * Beta is equivalent to alpha, but controls the smoothing of the trend instead of the data. * Gamma is equivalent to alpha, but controls the smoothing of the seasonality instead of the data - * + *

                            * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN * The average is based on the count of non-null, non-NaN values. diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/SimulatedAnealingMinimizer.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/SimulatedAnealingMinimizer.java index a61a866228161..8427346357b0e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/SimulatedAnealingMinimizer.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/SimulatedAnealingMinimizer.java @@ -36,11 +36,11 @@ /** * A cost minimizer which will fit a MovAvgModel to the data. - * + *

                            * This optimizer uses naive simulated annealing. Random solutions in the problem space * are generated, compared against the last period of data, and the least absolute deviation * is recorded as a cost. - * + *

                            * If the new cost is better than the old cost, the new coefficients are chosen. If the new * solution is worse, there is a temperature-dependent probability it will be randomly selected * anyway. This allows the algo to sample the problem space widely. As iterations progress, @@ -114,7 +114,7 @@ private static double acceptanceProbability(double oldCost, double newCost, doub /** * Calculates the "cost" of a model. E.g. when run on the training data, how closely do the predictions * match the test data - * + *

                            * Uses Least Absolute Differences to calculate error. Note that this is not scale free, but seems * to work fairly well in practice * diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java index 25cdd76183602..a7de47bed2e6e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java @@ -78,9 +78,9 @@ * Provides a set of static helpers to determine if a particular type of InternalAggregation "has a value" * or not. This can be difficult to determine from an external perspective because each agg uses * different internal bookkeeping to determine if it is empty or not (NaN, +/-Inf, 0.0, etc). - * + *

                            * This set of helpers aim to ease that task by codifying what "empty" is for each agg. - * + *

                            * It is not entirely accurate for all aggs, since some do not expose or track the needed state * (e.g. sum doesn't record count, so it's not clear if the sum is 0.0 because it is empty * or because of summing to zero). Pipeline aggs in particular are not well supported diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/CoreValuesSourceType.java b/server/src/main/java/org/opensearch/search/aggregations/support/CoreValuesSourceType.java index 00c769c015fbc..f6d6fe28a56d3 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/CoreValuesSourceType.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/CoreValuesSourceType.java @@ -274,7 +274,7 @@ public ValuesSource replaceMissing(ValuesSource valuesSource, Object rawMissing, * MappedFieldType, it prefers to get the formatter from there. Only when a field can't be * resolved (which is to say script cases and unmapped field cases), it will fall back to calling this method on whatever * ValuesSourceType it was able to resolve to. - * + *

                            * For geoshape field we may never hit this function till we have aggregations which are only geo_shape * specific and not present on geo_points, as we use default CoreValueSource types for Geo based aggregations * as GEOPOINT diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java index e3f914ca259f6..c866238d12fcb 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java @@ -51,7 +51,7 @@ /** * Similar to {@link ValuesSourceAggregationBuilder}, except it references multiple ValuesSources (e.g. so that an aggregation * can pull values from multiple fields). - * + *

                            * A limitation of this class is that all the ValuesSource's being refereenced must be of the same type. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index 70382369d5615..7a73fafb4a809 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -264,7 +264,7 @@ protected final void doWriteTo(StreamOutput out) throws IOException { /** * DO NOT OVERRIDE THIS! - * + *

                            * This method only exists for legacy support. No new aggregations need this, nor should they override it. * * @param version For backwards compatibility, subclasses can change behavior based on the version diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceType.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceType.java index 86102e63297d1..9158e9f59cab2 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceType.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceType.java @@ -43,14 +43,14 @@ * {@link ValuesSourceType} represents a collection of fields that share a common set of operations, for example all numeric fields. * Aggregations declare their support for a given ValuesSourceType (via {@link ValuesSourceRegistry.Builder#register}), * and should then not need to care about the fields which use that ValuesSourceType. - * + *

                            * ValuesSourceTypes provide a set of methods to instantiate concrete {@link ValuesSource} instances, based on the actual source of the * data for the aggregations. In general, aggregations should not call these methods, but rather rely on {@link ValuesSourceConfig} to have * selected the correct implementation. - * + *

                            * ValuesSourceTypes should be stateless. We recommend that plugins define an enum for their ValuesSourceTypes, even if the plugin only * intends to define one ValuesSourceType. ValuesSourceTypes are not serialized as part of the aggregations framework. - * + *

                            * Prefer reusing an existing ValuesSourceType (ideally from {@link CoreValuesSourceType}) over creating a new type. There are some cases * where creating a new type is necessary however. In particular, consider a new ValuesSourceType if the field has custom encoding/decoding * requirements; if the field needs to expose additional information to the aggregation (e.g. {@link ValuesSource.Range#rangeType()}); or diff --git a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java index 29fbee416a9b8..ebf9623eb367a 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java +++ b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java @@ -255,8 +255,8 @@ boolean isNodeInDuress() { return isNodeInDuress; } - /** - * Returns true if the increase in heap usage is due to search requests. + /* + Returns true if the increase in heap usage is due to search requests. */ /** diff --git a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java index d20e3e50d419f..4c28d96d8289e 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java +++ b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java @@ -54,7 +54,7 @@ private static class Defaults { /** * Defines the percentage of tasks to cancel relative to the number of successful task completions. * In other words, it is the number of tokens added to the bucket on each successful task completion. - * + *

                            * The setting below is deprecated. * To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. */ @@ -71,7 +71,7 @@ private static class Defaults { /** * Defines the number of tasks to cancel per unit time (in millis). * In other words, it is the number of tokens added to the bucket each millisecond. - * + *

                            * The setting below is deprecated. * To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. */ @@ -86,7 +86,7 @@ private static class Defaults { /** * Defines the maximum number of tasks that can be cancelled before being rate-limited. - * + *

                            * The setting below is deprecated. * To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. */ diff --git a/server/src/main/java/org/opensearch/search/fetch/FetchSubPhase.java b/server/src/main/java/org/opensearch/search/fetch/FetchSubPhase.java index fa30b2a5c7450..f3a1d5cafe755 100644 --- a/server/src/main/java/org/opensearch/search/fetch/FetchSubPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/FetchSubPhase.java @@ -88,7 +88,7 @@ public int docId() { /** * This lookup provides access to the source for the given hit document. Note * that it should always be set to the correct doc ID and {@link LeafReaderContext}. - * + *

                            * In most cases, the hit document's source is loaded eagerly at the start of the * {@link FetchPhase}. This lookup will contain the preloaded source. */ @@ -103,7 +103,7 @@ public IndexReader topLevelReader() { /** * Returns a {@link FetchSubPhaseProcessor} for this sub phase. - * + *

                            * If nothing should be executed for the provided {@code FetchContext}, then the * implementation should return {@code null} */ diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesPhase.java b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesPhase.java index a8ab8c0dcb8a8..9b17d9dbcd8de 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesPhase.java @@ -47,7 +47,7 @@ /** * Fetch sub phase which pulls data from doc values. - * + *

                            * Specifying {@code "docvalue_fields": ["field1", "field2"]} * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java b/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java index f50524244b115..cc941bb240b91 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java @@ -138,7 +138,7 @@ public SearchContext parentSearchContext() { /** * The _id of the root document. - * + *

                            * Since this ID is available on the context, inner hits can avoid re-loading the root _id. */ public String getId() { @@ -151,7 +151,7 @@ public void setId(String id) { /** * A source lookup for the root document. - * + *

                            * This shared lookup allows inner hits to avoid re-loading the root _source. */ public SourceLookup getRootLookup() { diff --git a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java index aa86ed4e56801..a520086de8051 100644 --- a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java @@ -285,7 +285,7 @@ protected void search(List leaves, Weight weight, Collector c /** * Lower-level search API. - * + *

                            * {@link LeafCollector#collect(int)} is called for every matching document in * the provided ctx. */ diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index dce6da897a74b..4ec21c469d9d3 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -286,7 +286,7 @@ public final void assignRescoreDocIds(RescoreDocIds rescoreDocIds) { /** * Indicates if the current index should perform frequent low level search cancellation check. - * + *

                            * Enabling low-level checks will make long running searches to react to the cancellation request faster. However, * since it will produce more cancellation checks it might slow the search performance down. */ diff --git a/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java b/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java index 812030acb9561..734f62d048a5a 100644 --- a/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java @@ -180,7 +180,7 @@ public List extractRawValues(String path) { /** * For the provided path, return its value in the source. - * + *

                            * Note that in contrast with {@link SourceLookup#extractRawValues}, array and object values * can be returned. * diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java b/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java index 9e5f2fa1592a4..bc82814e88bbd 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java @@ -31,7 +31,7 @@ /** * TODO: Copied verbatim from {@link org.opensearch.ingest.PipelineConfiguration}. - * + *

                            * See if we can refactor into a common class. I suspect not, just because this one will hold */ public class PipelineConfiguration extends AbstractDiffable implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/search/pipeline/Processor.java b/server/src/main/java/org/opensearch/search/pipeline/Processor.java index fb33f46acada4..0120d68ceb5aa 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/Processor.java +++ b/server/src/main/java/org/opensearch/search/pipeline/Processor.java @@ -15,7 +15,7 @@ * Whether changes are made and what exactly is modified is up to the implementation. *

                            * Processors may get called concurrently and thus need to be thread-safe. - * + *

                            * TODO: Refactor {@link org.opensearch.ingest.Processor} to extend this interface, and specialize to IngestProcessor. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/pipeline/ProcessorInfo.java b/server/src/main/java/org/opensearch/search/pipeline/ProcessorInfo.java index 0206b9b6cf716..0b80cdbef6669 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/ProcessorInfo.java +++ b/server/src/main/java/org/opensearch/search/pipeline/ProcessorInfo.java @@ -18,7 +18,7 @@ /** * Information about a search pipeline processor - * + *

                            * TODO: This is copy/pasted from the ingest ProcessorInfo. * Can/should we share implementation or is this just boilerplate? * diff --git a/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java b/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java index d1f525477f25f..904b04b249b1b 100644 --- a/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java +++ b/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java @@ -70,7 +70,7 @@ public AbstractInternalProfileTree() { * Returns a {@link QueryProfileBreakdown} for a scoring query. Scoring queries (e.g. those * that are past the rewrite phase and are now being wrapped by createWeight() ) follow * a recursive progression. We can track the dependency tree by a simple stack - * + *

                            * The only hiccup is that the first scoring query will be identical to the last rewritten * query, so we need to take special care to fix that * @@ -109,7 +109,7 @@ public PB getProfileBreakdown(E query) { /** * Helper method to add a new node to the dependency tree. - * + *

                            * Initializes a new list in the dependency tree, saves the query and * generates a new {@link AbstractProfileBreakdown} to track the timings * of this element. diff --git a/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollector.java b/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollector.java index 8b860c3a58cea..024d91a8e2ed2 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollector.java +++ b/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollector.java @@ -44,11 +44,12 @@ /** * This class wraps a Lucene Collector and times the execution of: - * - setScorer() - * - collect() - * - doSetNextReader() - * - needsScores() - * + *

                              + *
                            • setScorer()
                            • + *
                            • collect()
                            • + *
                            • doSetNextReader()
                            • + *
                            • needsScores()
                            • + *
                            * InternalProfiler facilitates the linking of the Collector graph * * @opensearch.internal @@ -117,7 +118,7 @@ public Collector getCollector() { /** * Creates a human-friendly representation of the Collector name. - * + *

                            * InternalBucket Collectors use the aggregation name in their toString() method, * which makes the profiled output a bit nicer. * diff --git a/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java b/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java index b1024c2312c50..a80ce1c658081 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java +++ b/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java @@ -44,7 +44,7 @@ * "online" as the weights are wrapped by ContextIndexSearcher. This allows us * to know the relationship between nodes in tree without explicitly * walking the tree or pre-wrapping everything - * + *

                            * A Profiler is associated with every Search, not per Search-Request. E.g. a * request may execute two searches (query + global agg). A Profiler just * represents one of those diff --git a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java index 7de605a244d09..f8a1e99ff585f 100644 --- a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java @@ -123,7 +123,7 @@ public static QuerySearchResult nullInstance() { * Returns true if the result doesn't contain any useful information. * It is used by the search action to avoid creating an empty response on * shard request that rewrites to match_no_docs. - * + *

                            * TODO: Currently we need the concrete aggregators to build empty responses. This means that we cannot * build an empty response in the coordinating node so we rely on this hack to ensure that at least one shard * returns a valid empty response. We should move the ability to create empty responses to aggregation builders diff --git a/server/src/main/java/org/opensearch/search/rescore/Rescorer.java b/server/src/main/java/org/opensearch/search/rescore/Rescorer.java index 33f8e5e7b535d..ae025f70c95b3 100644 --- a/server/src/main/java/org/opensearch/search/rescore/Rescorer.java +++ b/server/src/main/java/org/opensearch/search/rescore/Rescorer.java @@ -41,7 +41,7 @@ /** * A query rescorer interface used to re-rank the Top-K results of a previously * executed search. - * + *

                            * Subclasses should borrow heavily from {@link QueryRescorer} because it is * fairly well behaved and documents that tradeoffs that it is making. There * is also an {@code ExampleRescorer} that is worth looking at. diff --git a/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java b/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java index d7d554c058c37..856e103193463 100644 --- a/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java +++ b/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java @@ -49,7 +49,7 @@ /** * A {@link SliceQuery} that uses the numeric doc values of a field to do the slicing. - * + *

                            * NOTE: With deterministic field values this query can be used across different readers safely. * If updates are accepted on the field you must ensure that the same reader is used for all `slice` queries. * diff --git a/server/src/main/java/org/opensearch/search/slice/TermsSliceQuery.java b/server/src/main/java/org/opensearch/search/slice/TermsSliceQuery.java index 297020fe2fe4d..05f36b0d6f3cf 100644 --- a/server/src/main/java/org/opensearch/search/slice/TermsSliceQuery.java +++ b/server/src/main/java/org/opensearch/search/slice/TermsSliceQuery.java @@ -53,7 +53,7 @@ /** * A {@link SliceQuery} that uses the terms dictionary of a field to do the slicing. - * + *

                            * NOTE: The cost of this filter is O(N*M) where N is the number of unique terms in the dictionary * and M is the average number of documents per term. * For each segment this filter enumerates the terms dictionary, computes the hash code for each term and fills diff --git a/server/src/main/java/org/opensearch/search/suggest/Suggest.java b/server/src/main/java/org/opensearch/search/suggest/Suggest.java index 17e5b44ec89e7..abd73b45ecc13 100644 --- a/server/src/main/java/org/opensearch/search/suggest/Suggest.java +++ b/server/src/main/java/org/opensearch/search/suggest/Suggest.java @@ -263,7 +263,7 @@ public void addTerm(T entry) { /** * Returns a integer representing the type of the suggestion. This is used for * internal serialization over the network. - * + *

                            * This class is now serialized as a NamedWriteable and this method only remains for backwards compatibility */ @Deprecated diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestion.java b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestion.java index 96e47cf7c8000..e3e6cad65be62 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestion.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestion.java @@ -63,14 +63,14 @@ /** * Suggestion response for {@link CompletionSuggester} results - * + *

                            * Response format for each entry: * { * "text" : STRING * "score" : FLOAT * "contexts" : CONTEXTS * } - * + *

                            * CONTEXTS : { * "CONTEXT_NAME" : ARRAY, * .. diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/FuzzyOptions.java b/server/src/main/java/org/opensearch/search/suggest/completion/FuzzyOptions.java index 56cc8fbfbcf66..139742f84b80b 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/FuzzyOptions.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/FuzzyOptions.java @@ -142,7 +142,7 @@ public int getEditDistance() { /** * Returns if transpositions option is set - * + *

                            * if transpositions is set, then swapping one character for another counts as one edit instead of two. */ public boolean isTranspositions() { diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/TopSuggestGroupDocsCollector.java b/server/src/main/java/org/opensearch/search/suggest/completion/TopSuggestGroupDocsCollector.java index 4fbd661037aa9..fbc39536502de 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/TopSuggestGroupDocsCollector.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/TopSuggestGroupDocsCollector.java @@ -43,7 +43,7 @@ /** * * Extension of the {@link TopSuggestDocsCollector} that returns top documents from the completion suggester. - * + *

                            * This collector groups suggestions coming from the same document but matching different contexts * or surface form together. When different contexts or surface forms match the same suggestion form only * the best one per document (sorted by weight) is kept. @@ -55,7 +55,7 @@ class TopSuggestGroupDocsCollector extends TopSuggestDocsCollector { /** * Sole constructor - * + *

                            * Collects at most num completions * with corresponding document and weight */ diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMapping.java b/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMapping.java index 61e60293f4943..94707ff2b4569 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMapping.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMapping.java @@ -54,7 +54,7 @@ /** * A {@link ContextMapping} defines criteria that can be used to * filter and/or boost suggestions at query time for {@link CompletionFieldMapper}. - * + *

                            * Implementations have to define how contexts are parsed at query/index time * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMappings.java b/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMappings.java index 0f5781fefcf07..cff5a901a473f 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMappings.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMappings.java @@ -124,11 +124,11 @@ public Iterator> iterator() { * Field prepends context values with a suggestion * Context values are associated with a type, denoted by * a type id, which is prepended to the context value. - * + *

                            * Every defined context mapping yields a unique type id (index of the * corresponding context mapping in the context mappings list) * for all its context values - * + *

                            * The type, context and suggestion values are encoded as follows: *

                            * TYPE_ID | CONTEXT_VALUE | CONTEXT_SEP | SUGGESTION_VALUE @@ -209,7 +209,7 @@ public ContextQuery toContextQuery(CompletionQuery query, Map * see {@link org.opensearch.search.suggest.completion.context.ContextMappings.TypedContextField} * @return a map of context names and their values * @@ -232,7 +232,7 @@ public Map> getNamedContexts(List contexts) { /** * Loads {@link ContextMappings} from configuration - * + *

                            * Expected configuration: * List of maps representing {@link ContextMapping} * [{"name": .., "type": .., ..}, {..}] @@ -286,7 +286,7 @@ private static String extractRequiredValue(Map contextConfig, St /** * Writes a list of objects specified by the defined {@link ContextMapping}s - * + *

                            * see {@link ContextMapping#toXContent(XContentBuilder, Params)} */ @Override diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java b/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java index 17f858b0b1302..0e29e928df760 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java @@ -71,7 +71,7 @@ * The suggestions can be boosted and/or filtered depending on * whether it falls within an area, represented by a query geo hash * with a specified precision - * + *

                            * {@link GeoQueryContext} defines the options for constructing * a unit of query context for this context type * diff --git a/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java b/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java index 6335629f61cf1..1a00cb9465771 100644 --- a/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java +++ b/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java @@ -147,9 +147,9 @@ public TermStats internalTermStats(BytesRef term) throws IOException { if (termsEnum.seekExact(term)) { return new TermStats( termsEnum.docFreq(), - /** - * We use the {@link TermsEnum#docFreq()} for fields that don't - * record the {@link TermsEnum#totalTermFreq()}. + /* + We use the {@link TermsEnum#docFreq()} for fields that don't + record the {@link TermsEnum#totalTermFreq()}. */ termsEnum.totalTermFreq() == -1 ? termsEnum.docFreq() : termsEnum.totalTermFreq() ); @@ -168,10 +168,10 @@ public CandidateSet drawCandidates(CandidateSet set) throws IOException { float origThreshold = spellchecker.getThresholdFrequency(); try { if (suggestMode != SuggestMode.SUGGEST_ALWAYS) { - /** - * We use the {@link TermStats#docFreq} to compute the frequency threshold - * because that's what {@link DirectSpellChecker#suggestSimilar} expects - * when filtering terms. + /* + We use the {@link TermStats#docFreq} to compute the frequency threshold + because that's what {@link DirectSpellChecker#suggestSimilar} expects + when filtering terms. */ int threshold = thresholdTermFrequency(original.termStats.docFreq); if (threshold == Integer.MAX_VALUE) { diff --git a/server/src/main/java/org/opensearch/search/suggest/phrase/LinearInterpolation.java b/server/src/main/java/org/opensearch/search/suggest/phrase/LinearInterpolation.java index f1dba9793ba9e..bc942da738c7e 100644 --- a/server/src/main/java/org/opensearch/search/suggest/phrase/LinearInterpolation.java +++ b/server/src/main/java/org/opensearch/search/suggest/phrase/LinearInterpolation.java @@ -70,7 +70,7 @@ public final class LinearInterpolation extends SmoothingModel { /** * Creates a linear interpolation smoothing model. - * + *

                            * Note: the lambdas must sum up to one. * * @param trigramLambda diff --git a/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggestionBuilder.java index e37d964cc0424..a6bfb880cf249 100644 --- a/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -222,7 +222,7 @@ public Integer gramSize() { * misspellings in order to form a correction. This method accepts a float * value in the range [0..1) as a fraction of the actual query terms a * number {@code >=1} as an absolute number of query terms. - * + *

                            * The default is set to {@code 1.0} which corresponds to that only * corrections with at most 1 misspelled term are returned. */ diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 1f063bfed8cfe..71918bc73b55a 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -1565,7 +1565,7 @@ private void runNextQueuedOperation(RepositoryData repositoryData, String reposi /** * Runs a cluster state update that checks whether we have outstanding snapshot deletions that can be executed and executes them. - * + *

                            * TODO: optimize this to execute in a single CS update together with finalizing the latest snapshot */ private void runReadyDeletions(RepositoryData repositoryData, String repository) { @@ -2758,7 +2758,7 @@ public boolean assertAllListenersResolved() { * Every shard snapshot or clone state update can result in multiple snapshots being updated. In order to determine whether or not a * shard update has an effect we use an outer loop over all current executing snapshot operations that iterates over them in the order * they were started in and an inner loop over the list of shard update tasks. - * + *

                            * If the inner loop finds that a shard update task applies to a given snapshot and either a shard-snapshot or shard-clone operation in * it then it will update the state of the snapshot entry accordingly. If that update was a noop, then the task is removed from the * iteration as it was already applied before and likely just arrived on the cluster-manager node again due to retries upstream. @@ -2768,7 +2768,7 @@ public boolean assertAllListenersResolved() { * a task in the executed tasks collection applied to a shard it was waiting for to become available, then the shard snapshot operation * will be started for that snapshot entry and the task removed from the collection of tasks that need to be applied to snapshot * entries since it can not have any further effects. - * + *

                            * Package private to allow for tests. */ static final ClusterStateTaskExecutor SHARD_STATE_EXECUTOR = new ClusterStateTaskExecutor() { @@ -3058,7 +3058,7 @@ private static ShardSnapshotStatus startShardSnapshotAfterClone(ClusterState cur /** * An update to the snapshot state of a shard. - * + *

                            * Package private for testing */ static final class ShardSnapshotUpdate { diff --git a/server/src/main/java/org/opensearch/tasks/Task.java b/server/src/main/java/org/opensearch/tasks/Task.java index dcd7fc581a8fd..ee2eae60eae8f 100644 --- a/server/src/main/java/org/opensearch/tasks/Task.java +++ b/server/src/main/java/org/opensearch/tasks/Task.java @@ -550,11 +550,11 @@ public int incrementResourceTrackingThreads() { * This method is called when threads finish execution, and also when the task is unregistered (to mark the task's * own thread as complete). When the active thread count becomes zero, the onTaskResourceTrackingCompleted method * is called exactly once on all registered listeners. - * + *

                            * Since a task is unregistered after the message is processed, it implies that the threads responsible to produce * the response must have started prior to it (i.e. startThreadResourceTracking called before unregister). * This ensures that the number of active threads doesn't drop to zero pre-maturely. - * + *

                            * Rarely, some threads may even start execution after the task is unregistered. As resource stats are piggy-backed * with the response, any thread usage info captured after the task is unregistered may be irrelevant. * diff --git a/server/src/main/java/org/opensearch/tasks/TaskCancellation.java b/server/src/main/java/org/opensearch/tasks/TaskCancellation.java index 9b61b2454b53c..2d152e513f197 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskCancellation.java +++ b/server/src/main/java/org/opensearch/tasks/TaskCancellation.java @@ -18,7 +18,7 @@ /** * TaskCancellation represents a task eligible for cancellation. * It doesn't guarantee that the task will actually get cancelled or not; that decision is left to the caller. - * + *

                            * It contains a list of cancellation reasons along with callbacks that are invoked when cancel() is called. * * @opensearch.internal @@ -87,7 +87,7 @@ private void runOnCancelCallbacks() { /** * Returns the sum of all cancellation scores. - * + *

                            * A zero score indicates no reason to cancel the task. * A task with a higher score suggests greater possibility of recovering the node when it is cancelled. */ diff --git a/server/src/main/java/org/opensearch/threadpool/Scheduler.java b/server/src/main/java/org/opensearch/threadpool/Scheduler.java index 86c322ec89dd7..4a7c63a1b559a 100644 --- a/server/src/main/java/org/opensearch/threadpool/Scheduler.java +++ b/server/src/main/java/org/opensearch/threadpool/Scheduler.java @@ -60,7 +60,7 @@ public interface Scheduler { /** * Create a scheduler that can be used client side. Server side, please use ThreadPool.schedule instead. - * + *

                            * Notice that if any scheduled jobs fail with an exception, these will bubble up to the uncaught exception handler where they will * be logged as a warning. This includes jobs started using execute, submit and schedule. * @param settings the settings to use @@ -178,7 +178,7 @@ interface ScheduledCancellable extends Delayed, Cancellable {} * This class encapsulates the scheduling of a {@link Runnable} that needs to be repeated on a interval. For example, checking a value * for cleanup every second could be done by passing in a Runnable that can perform the check and the specified interval between * executions of this runnable. NOTE: the runnable is only rescheduled to run again after completion of the runnable. - * + *

                            * For this class, completion means that the call to {@link Runnable#run()} returned or an exception was thrown and caught. In * case of an exception, this class will log the exception and reschedule the runnable for its next execution. This differs from the * {@link ScheduledThreadPoolExecutor#scheduleWithFixedDelay(Runnable, long, long, TimeUnit)} semantics as an exception there would diff --git a/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java b/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java index 183b9b2f4cf9a..90f50f78d84ad 100644 --- a/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java +++ b/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java @@ -25,7 +25,7 @@ /** * Responsible for wrapping the original task's runnable and sending updates on when it starts and finishes to * entities listening to the events. - * + *

                            * It's able to associate runnable with a task with the help of task Id available in thread context. */ public class TaskAwareRunnable extends AbstractRunnable implements WrappedRunnable { diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index ecb5b2cef58ac..fab7620292dd2 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -320,7 +320,7 @@ public ThreadPool( /** * Returns a value of milliseconds that may be used for relative time calculations. - * + *

                            * This method should only be used for calculating time deltas. For an epoch based * timestamp, see {@link #absoluteTimeInMillis()}. */ @@ -330,7 +330,7 @@ public long relativeTimeInMillis() { /** * Returns a value of nanoseconds that may be used for relative time calculations. - * + *

                            * This method should only be used for calculating time deltas. For an epoch based * timestamp, see {@link #absoluteTimeInMillis()}. */ @@ -343,7 +343,7 @@ public long relativeTimeInNanos() { * that require the highest precision possible. Performance critical code must use * either {@link #relativeTimeInNanos()} or {@link #relativeTimeInMillis()} which * give better performance at the cost of lower precision. - * + *

                            * This method should only be used for calculating time deltas. For an epoch based * timestamp, see {@link #absoluteTimeInMillis()}. */ @@ -353,7 +353,7 @@ public long preciseRelativeTimeInNanos() { /** * Returns the value of milliseconds since UNIX epoch. - * + *

                            * This method should only be used for exact date/time formatting. For calculating * time deltas that should not suffer from negative deltas, which are possible with * this method, see {@link #relativeTimeInMillis()}. @@ -649,7 +649,7 @@ public String toString() { /** * A thread to cache millisecond time values from * {@link System#nanoTime()} and {@link System#currentTimeMillis()}. - * + *

                            * The values are updated at a specified interval. */ static class CachedTimeThread extends Thread { diff --git a/server/src/main/java/org/opensearch/transport/CompressibleBytesOutputStream.java b/server/src/main/java/org/opensearch/transport/CompressibleBytesOutputStream.java index 5cb169439a14d..57707d3b44477 100644 --- a/server/src/main/java/org/opensearch/transport/CompressibleBytesOutputStream.java +++ b/server/src/main/java/org/opensearch/transport/CompressibleBytesOutputStream.java @@ -48,11 +48,11 @@ * requires that the underlying {@link DeflaterOutputStream} be closed to write EOS bytes. However, the * {@link BytesStream} should not be closed yet, as we have not used the bytes. This class handles these * intricacies. - * + *

                            * {@link CompressibleBytesOutputStream#materializeBytes()} should be called when all the bytes have been * written to this stream. If compression is enabled, the proper EOS bytes will be written at that point. * The underlying {@link BytesReference} will be returned. - * + *

                            * {@link CompressibleBytesOutputStream#close()} will NOT close the underlying stream. The byte stream passed * in the constructor must be closed individually. * diff --git a/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java index 1599c4cb75517..8a5f6dfffb036 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java @@ -54,10 +54,10 @@ * current node is part of the cluster and it won't receive cluster state updates from the remote cluster. Remote clusters are also not * fully connected with the current node. From a connection perspective a local cluster forms a bi-directional star network while in the * remote case we only connect to a subset of the nodes in the cluster in an uni-directional fashion. - * + *

                            * This class also handles the discovery of nodes from the remote cluster. The initial list of seed nodes is only used to discover all nodes * in the remote cluster and connects to all eligible nodes, for details see {@link RemoteClusterService#REMOTE_NODE_ATTRIBUTE}. - * + *

                            * In the case of a disconnection, this class will issue a re-connect task to establish at most * {@link SniffConnectionStrategy#REMOTE_CONNECTIONS_PER_CLUSTER} until either all eligible nodes are exhausted or the maximum number of * connections per cluster has been reached. @@ -123,7 +123,7 @@ void ensureConnected(ActionListener listener) { /** * Collects all nodes on the connected cluster and returns / passes a nodeID to {@link DiscoveryNode} lookup function * that returns null if the node ID is not found. - * + *

                            * The requests to get cluster state on the connected cluster are made in the system context because logically * they are equivalent to checking a single detail in the local cluster state and should not require that the * user who made the request that is using this method in its implementation is authorized to view the entire diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index 32bedb52a9cef..de88c3619abe8 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -170,12 +170,12 @@ public void close() {} }; static { - /** - * Registers server specific types as a streamables for serialization - * over the {@link StreamOutput} and {@link StreamInput} wire + /* + Registers server specific types as a streamables for serialization + over the {@link StreamOutput} and {@link StreamInput} wire */ Streamables.registerStreamables(); - /** Registers OpenSearch server specific exceptions (exceptions outside of core library) */ + /* Registers OpenSearch server specific exceptions (exceptions outside of core library) */ OpenSearchServerException.registerExceptions(); } diff --git a/server/src/main/java/org/opensearch/watcher/FileWatcher.java b/server/src/main/java/org/opensearch/watcher/FileWatcher.java index 82f95d6a1622c..d773e3b5d7c9e 100644 --- a/server/src/main/java/org/opensearch/watcher/FileWatcher.java +++ b/server/src/main/java/org/opensearch/watcher/FileWatcher.java @@ -44,7 +44,7 @@ /** * File resources watcher - * + *

                            * The file watcher checks directory and all its subdirectories for file changes and notifies its listeners accordingly * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java b/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java index 9b9c00cd4252f..62002f9c6c323 100644 --- a/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java +++ b/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java @@ -49,7 +49,7 @@ /** * Generic resource watcher service - * + *

                            * Other opensearch services can register their resource watchers with this service using {@link #add(ResourceWatcher)} * method. This service will call {@link org.opensearch.watcher.ResourceWatcher#checkAndNotify()} method of all * registered watcher periodically. The frequency of checks can be specified using {@code resource.reload.interval} setting, which diff --git a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java index 7546a2d7c898e..d83539a2fef61 100644 --- a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java +++ b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java @@ -976,7 +976,7 @@ public void testFailureToAndFromXContentWithDetails() throws IOException { /** * Builds a {@link ToXContent} using a JSON XContentBuilder and compares the result to the given json in string format. - * + *

                            * By default, the stack trace of the exception is not rendered. The parameter `errorTrace` forces the stack trace to * be rendered like the REST API does when the "error_trace" parameter is set to true. */ diff --git a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java index 2643aa5b6db01..6cbe458a35ef8 100644 --- a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java @@ -339,8 +339,8 @@ public void onFailure(Exception e) { createListener.onFailure(new Exception("Exception occurred in phase 1")); latch.await(); assertEquals(0, updateNodesInvoked.size()); - /** - * cleanup is not called on create pit phase one failure + /* + cleanup is not called on create pit phase one failure */ assertEquals(0, deleteNodesInvoked.size()); } @@ -438,8 +438,8 @@ public void onFailure(Exception e) { createListener.onResponse(searchResponse); latch.await(); assertEquals(3, updateNodesInvoked.size()); - /** - * check if cleanup is called for all nodes in case of update pit failure + /* + check if cleanup is called for all nodes in case of update pit failure */ assertEquals(3, deleteNodesInvoked.size()); } @@ -526,8 +526,8 @@ public void onFailure(Exception e) { createListener.onResponse(searchResponse); latch.await(); assertEquals(3, updateNodesInvoked.size()); - /** - * check if cleanup is called for all nodes in case of update pit failure + /* + check if cleanup is called for all nodes in case of update pit failure */ assertEquals(3, deleteNodesInvoked.size()); } diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java index 03150d6e30755..dad0fa0efd3ec 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java @@ -153,7 +153,7 @@ public class TransportReplicationActionTests extends OpenSearchTestCase { /** * takes a request that was sent by a {@link TransportReplicationAction} and captured * and returns the underlying request if it's wrapped or the original (cast to the expected type). - * + *

                            * This will throw a {@link ClassCastException} if the request is of the wrong type. */ public static R resolveRequest(TransportRequest requestOrWrappedRequest) { diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java index 839ac7ab5bb92..cce8758ef1014 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java @@ -114,7 +114,7 @@ * This test tests the concurrent execution of several transport replication actions. All of these actions (except one) acquire a single * permit during their execution on shards and are expected to fail if a global level or index level block is present in the cluster state. * These actions are all started at the same time, but some are delayed until one last action. - * + *

                            * This last action is special because it acquires all the permits on shards, adds the block to the cluster state and then "releases" the * previously delayed single permit actions. This way, there is a clear transition between the single permit actions executed before the * all permit action that sets the block and those executed afterwards that are doomed to fail because of the block. diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationPriorityTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationPriorityTests.java index f9f181402da1b..8cd664c8c13fc 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationPriorityTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationPriorityTests.java @@ -50,7 +50,7 @@ public class AllocationPriorityTests extends OpenSearchAllocationTestCase { /** * Tests that higher prioritized primaries and replicas are allocated first even on the balanced shard allocator - * See https://github.com/elastic/elasticsearch/issues/13249 for details + * See elasticsearch issue #13249 for details */ public void testPrioritizedIndicesAllocatedFirst() { AllocationService allocation = createAllocationService( diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java index ba1e2d27a3bc5..62dce9c4edeb5 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -247,13 +247,13 @@ public void testPrimaryBalanceWithPreferPrimaryBalanceSetting() { /** * This test verifies the allocation logic when nodes breach multiple constraints and ensure node breaching min * constraints chosen for allocation. - * + *

                            * This test mimics a cluster state containing four nodes, where one node breaches two constraints while one breaches * only one. In order to have nodes breach constraints, test excludes two nodes (node2, node3) from allocation so * that other two nodes (node0, node1) have all shards assignments resulting in constraints breach. Test asserts that * the new primary shard assignment lands on the node breaching one constraint(node1), while replica land on the other * (node0). Final shard allocation state. - * + *

                            routing_nodes: -----node_id[node2][V] -----node_id[node3][V] @@ -384,13 +384,13 @@ public void testGlobalPrimaryBalance() throws Exception { * This test mimics a cluster state which can not be rebalanced due to * {@link org.opensearch.cluster.routing.allocation.decider.SameShardAllocationDecider} * allocation decider which prevents shard relocation, leaving cluster unbalanced on primaries. - * + *

                            * There are two nodes (N1, N2) where all primaries land on N1 while replicas on N2. * N1 N2 * ------ -------- * P1 R1 * P2 R2 - * + *

                            * -----node_id[node_0][V] * --------[test][1], node[node_0], [P], s[STARTED], a[id=xqfZSToVSQaff2xvuxh_yA] * --------[test][0], node[node_0], [P], s[STARTED], a[id=VGjOeBGdSmu3pJR6T7v29A] @@ -454,14 +454,14 @@ public void testPrimaryBalance_NotSolved_1() { * This test mimics cluster state where re-balancing is not possible due to existing limitation of re-balancing * logic which applies at index level i.e. balance shards single index across all nodes. This will be solved when * primary shard count across indices, constraint is added. - * + *

                            * Please note, P1, P2 belongs to different index - * + *

                            * N1 N2 * ------ -------- * P1 R1 * P2 R2 - * + *

                            * -----node_id[node_0][V] * --------[test1][0], node[node_0], [P], s[STARTED], a[id=u7qtyy5AR42hgEa-JpeArg] * --------[test0][0], node[node_0], [P], s[STARTED], a[id=BQrLSo6sQyGlcLdVvGgqLQ] diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/CatAllocationTestCase.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/CatAllocationTestCase.java index 10271cad33fec..8f90882c21804 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/CatAllocationTestCase.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/CatAllocationTestCase.java @@ -66,7 +66,7 @@ * A base testcase that allows to run tests based on the output of the CAT API * The input is a line based cat/shards output like: * kibana-int 0 p STARTED 2 24.8kb 10.202.245.2 r5-9-35 - * + *

                            * the test builds up a clusterstate from the cat input and optionally runs a full balance on it. * This can be used to debug cluster allocation decisions. */ diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java index cf32d2b3cf00f..7f2f048485318 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java @@ -30,7 +30,7 @@ public class IndexShardConstraintDeciderOverlapTests extends OpenSearchAllocatio /** * High watermark breach blocks new shard allocations to affected nodes. If shard count on such * nodes is low, this will cause IndexShardPerNodeConstraint to breach. - * + *

                            * This test verifies that this doesn't lead to unassigned shards, and there are no hot spots in eligible * nodes. */ diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardHotSpotTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardHotSpotTests.java index 2efbb256e36bc..617c9b4701722 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardHotSpotTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardHotSpotTests.java @@ -58,7 +58,7 @@ public void testUnderReplicatedClusterScaleOut() { /** * Test cluster scale in scenario, when nodes are gracefully excluded from * cluster before termination. - * + *

                            * During moveShards(), shards are picked from across indexes in an interleaved manner. * This prevents hot spots by evenly picking up shards. Since shard order can change * in subsequent runs. diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java index ef9ae90e18bb5..e1c0a7eff1f6e 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java @@ -21,7 +21,7 @@ public class RemoteShardsRebalanceShardsTests extends RemoteShardsBalancerBaseTe /** * Test remote shard allocation and balancing for standard new cluster setup. - * + *

                            * Post rebalance primaries should be balanced across all the nodes. */ public void testShardAllocationAndRebalance() { @@ -72,7 +72,7 @@ private int getTotalShardCountAcrossNodes(final Map nodePrimari /** * Asserts that the expected value is within the variance range. - * + *

                            * Being used to assert the average number of shards per node. * Variance is required in case of non-absolute mean values; * for example, total number of remote capable nodes in a cluster. diff --git a/server/src/test/java/org/opensearch/common/RoundingTests.java b/server/src/test/java/org/opensearch/common/RoundingTests.java index 0ebfe02dc7641..1a499bac3e2e8 100644 --- a/server/src/test/java/org/opensearch/common/RoundingTests.java +++ b/server/src/test/java/org/opensearch/common/RoundingTests.java @@ -239,7 +239,7 @@ public void testOffsetRounding() { * {@link DateTimeUnit} and {@link ZoneId} and often (50% of the time) * chooses test dates that are exactly on or close to offset changes (e.g. * DST) in the chosen time zone. - * + *

                            * It rounds the test date down and up and performs various checks on the * rounding unit interval that is defined by this. Assumptions tested are * described in diff --git a/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java b/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java index 25140d1ef73d5..e16dd859ec14c 100644 --- a/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java +++ b/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java @@ -1169,9 +1169,9 @@ public void testParseOGCPolygonWithHoles() throws IOException, ParseException { } public void testParseInvalidPolygon() throws IOException { - /** - * The following 3 test cases ensure proper error handling of invalid polygons - * per the GeoJSON specification + /* + The following 3 test cases ensure proper error handling of invalid polygons + per the GeoJSON specification */ // test case 1: create an invalid polygon with only 2 points String invalidPoly = XContentFactory.jsonBuilder() diff --git a/server/src/test/java/org/opensearch/common/geo/GeometryIndexerTests.java b/server/src/test/java/org/opensearch/common/geo/GeometryIndexerTests.java index 297b7d9f65d5f..7fc95c2316aef 100644 --- a/server/src/test/java/org/opensearch/common/geo/GeometryIndexerTests.java +++ b/server/src/test/java/org/opensearch/common/geo/GeometryIndexerTests.java @@ -225,7 +225,7 @@ public static MultiPoint remove180s(MultiPoint points) { /** * A randomized test that generates a random lines crossing anti-merdian and checks that the decomposed segments of this line * have the same total length (measured using Euclidean distances between neighboring points) as the original line. - * + *

                            * It also extracts all points from these lines, performs normalization of these points and then compares that the resulting * points of line normalization match the points of points normalization with the exception of points that were created on the * antimeridian as the result of line decomposition. diff --git a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java index 02bc6c58ad233..e8ddfde11f4cc 100644 --- a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java @@ -61,8 +61,8 @@ protected boolean enableWarningsCheck() { } public void testTimezoneParsing() { - /** this testcase won't work in joda. See comment in {@link #testPartialTimeParsing()} - * assertSameDateAs("2016-11-30T+01", "strict_date_optional_time", "strict_date_optional_time"); + /* this testcase won't work in joda. See comment in {@link #testPartialTimeParsing()} + assertSameDateAs("2016-11-30T+01", "strict_date_optional_time", "strict_date_optional_time"); */ assertSameDateAs("2016-11-30T00+01", "strict_date_optional_time", "strict_date_optional_time"); assertSameDateAs("2016-11-30T00+0100", "strict_date_optional_time", "strict_date_optional_time"); diff --git a/server/src/test/java/org/opensearch/common/rounding/TimeZoneRoundingTests.java b/server/src/test/java/org/opensearch/common/rounding/TimeZoneRoundingTests.java index 8297f8fcf47e2..d1b3adcd55f0c 100644 --- a/server/src/test/java/org/opensearch/common/rounding/TimeZoneRoundingTests.java +++ b/server/src/test/java/org/opensearch/common/rounding/TimeZoneRoundingTests.java @@ -212,7 +212,7 @@ public void testTimeUnitRoundingDST() { * {@link DateTimeUnit} and {@link DateTimeZone} and often (50% of the time) * chooses test dates that are exactly on or close to offset changes (e.g. * DST) in the chosen time zone. - * + *

                            * It rounds the test date down and up and performs various checks on the * rounding unit interval that is defined by this. Assumptions tested are * described in diff --git a/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java index f3f682697a930..9a0d34c916f5c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java @@ -1097,8 +1097,8 @@ public void testLimitNestedDocsMultipleNestedFields() throws Exception { @Override protected boolean forbidPrivateIndexSettings() { - /** - * This is needed to force the index version with {@link IndexMetadata.SETTING_INDEX_VERSION_CREATED}. + /* + This is needed to force the index version with {@link IndexMetadata.SETTING_INDEX_VERSION_CREATED}. */ return false; } diff --git a/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java b/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java index 729205c9775b4..7c7598e5dc3c4 100644 --- a/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java @@ -109,7 +109,7 @@ public void testSerialization() throws Exception { /** * Test that if we serialize and deserialize an object, further * serialization leads to identical bytes representation. - * + *

                            * This is necessary to ensure because we use the serialized BytesReference * of this builder as part of the cacheKey in * {@link ShardSearchRequest} (via diff --git a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java index 4bbb03322b1f8..2661873d9498f 100644 --- a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java @@ -321,9 +321,9 @@ public void testCreatePitMoreThanMaxOpenPitContexts() throws Exception { // deleteall DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds.toArray(new String[0])); - /** - * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context - * not found exceptions don't result in failures ( as deletion in one node is successful ) + /* + When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + not found exceptions don't result in failures ( as deletion in one node is successful ) */ ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); @@ -489,8 +489,8 @@ public void testPitAfterUpdateIndex() throws Exception { client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get().getHits().getTotalHits().value, Matchers.equalTo(50L) ); - /** - * assert without point in time + /* + assert without point in time */ assertThat( @@ -509,8 +509,8 @@ public void testPitAfterUpdateIndex() throws Exception { client().prepareSearch().setSize(0).setQuery(termQuery("message", "update")).get().getHits().getTotalHits().value, Matchers.equalTo(50L) ); - /** - * using point in time id will have the same search results as ones before update + /* + using point in time id will have the same search results as ones before update */ assertThat( client().prepareSearch() diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/DateScriptMocksPlugin.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/DateScriptMocksPlugin.java index 5a9a9e2b6cb51..d6ba4eedd3a19 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/DateScriptMocksPlugin.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/DateScriptMocksPlugin.java @@ -43,7 +43,7 @@ /** * Mock scripts shared by DateRangeIT and DateHistogramIT. - * + *

                            * Provides {@link DateScriptMocksPlugin#EXTRACT_FIELD}, {@link DateScriptMocksPlugin#DOUBLE_PLUS_ONE_MONTH}, * and {@link DateScriptMocksPlugin#LONG_PLUS_ONE_MONTH} scripts. */ diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java index aa51f9b11ea19..39054f15826f0 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java @@ -44,7 +44,7 @@ /** * Duplicates the tests from {@link CompositeAggregationBuilderTests}, except using the deprecated * interval on date histo. Separated to make testing the warnings easier. - * + *

                            * Can be removed in when the legacy interval options are gone */ public class LegacyIntervalCompositeAggBuilderTests extends BaseAggregationTestCase { diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/MetricAggScriptPlugin.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/MetricAggScriptPlugin.java index e29eb95a52cd7..b4dbc8017bc3b 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/MetricAggScriptPlugin.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/MetricAggScriptPlugin.java @@ -45,7 +45,7 @@ /** * Provides a number of dummy scripts for tests. - * + *

                            * Each script provided allows for an {@code inc} parameter which will * be added to each value read from a document. */ diff --git a/server/src/test/java/org/opensearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java index 4b8d0ef83dd1b..de213a154c3c5 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java @@ -80,15 +80,15 @@ public class AvgBucketAggregatorTests extends AggregatorTestCase { /** * Test for issue #30608. Under the following circumstances: - * + *

                            * A. Multi-bucket agg in the first entry of our internal list * B. Regular agg as the immediate child of the multi-bucket in A * C. Regular agg with the same name as B at the top level, listed as the second entry in our internal list * D. Finally, a pipeline agg with the path down to B - * + *

                            * BucketMetrics reduction would throw a class cast exception due to bad subpathing. This test ensures * it is fixed. - * + *

                            * Note: we have this test inside of the `avg_bucket` package so that we can get access to the package-private * `reduce()` needed for testing this */ diff --git a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineInfoTests.java b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineInfoTests.java index c19c1ebcb5c26..ce7344fa0d2d6 100644 --- a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineInfoTests.java +++ b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineInfoTests.java @@ -46,7 +46,7 @@ public void testSerializationRoundtrip() throws IOException { /** * When serializing / deserializing to / from old versions, processor type info is lost. - * + *

                            * Also, we only supported request/response processors. */ public void testSerializationRoundtripBackcompat() throws IOException { diff --git a/test/framework/src/main/java/org/opensearch/action/support/ActionTestUtils.java b/test/framework/src/main/java/org/opensearch/action/support/ActionTestUtils.java index 00fd5a5bcf815..52496c331c25e 100644 --- a/test/framework/src/main/java/org/opensearch/action/support/ActionTestUtils.java +++ b/test/framework/src/main/java/org/opensearch/action/support/ActionTestUtils.java @@ -55,7 +55,7 @@ public static R /** * Executes the given action. - * + *

                            * This is a shim method to make execution publicly available in tests. */ public static void execute( diff --git a/test/framework/src/main/java/org/opensearch/cli/CommandTestCase.java b/test/framework/src/main/java/org/opensearch/cli/CommandTestCase.java index 4ee1314d27fe1..5566b493adc7d 100644 --- a/test/framework/src/main/java/org/opensearch/cli/CommandTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cli/CommandTestCase.java @@ -54,7 +54,7 @@ public void resetTerminal() { /** * Runs a command with the given args. - * + *

                            * Output can be found in {@link #terminal}. */ public String execute(String... args) throws Exception { diff --git a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationWithConstraintsTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationWithConstraintsTestCase.java index 7f1d1d3381751..0c08de252e4cd 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationWithConstraintsTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationWithConstraintsTestCase.java @@ -209,28 +209,28 @@ public int allocateAndCheckIndexShardHotSpots(boolean expected, int nodes, Strin continue; } - /** - * Hot spots can occur due to the order in which shards get allocated to nodes. - * A node with fewer shards may not be able to accept current shard due to - * SameShardAllocationDecider, causing it to breach allocation constraint on - * another node. We need to differentiate between such hot spots v/s actual hot - * spots. - * - * A simple check could be to ensure there is no node with shards less than - * allocation limit, that can accept current shard. However, in current - * allocation algorithm, when nodes get throttled, shards are added to - * ModelNodes without adding them to actual cluster (RoutingNodes). As a result, - * the shards per node we see here, are different from the ones observed by - * weight function in balancer. RoutingNodes with {@link count} < {@link limit} - * may not have had the same count in the corresponding ModelNode seen by weight - * function. We hence use the following alternate check -- - * - * Given the way {@link limit} is defined, we should not have hot spots if *all* - * nodes are eligible to accept the shard. A hot spot is acceptable, if either - * all peer nodes have {@link count} > {@link limit}, or if even one node is - * ineligible to accept the shard due to SameShardAllocationDecider, as this - * leads to a chain of events that breach IndexShardsPerNode constraint on all - * other nodes. + /* + Hot spots can occur due to the order in which shards get allocated to nodes. + A node with fewer shards may not be able to accept current shard due to + SameShardAllocationDecider, causing it to breach allocation constraint on + another node. We need to differentiate between such hot spots v/s actual hot + spots. + + A simple check could be to ensure there is no node with shards less than + allocation limit, that can accept current shard. However, in current + allocation algorithm, when nodes get throttled, shards are added to + ModelNodes without adding them to actual cluster (RoutingNodes). As a result, + the shards per node we see here, are different from the ones observed by + weight function in balancer. RoutingNodes with {@link count} < {@link limit} + may not have had the same count in the corresponding ModelNode seen by weight + function. We hence use the following alternate check -- + + Given the way {@link limit} is defined, we should not have hot spots if *all* + nodes are eligible to accept the shard. A hot spot is acceptable, if either + all peer nodes have {@link count} > {@link limit}, or if even one node is + ineligible to accept the shard due to SameShardAllocationDecider, as this + leads to a chain of events that breach IndexShardsPerNode constraint on all + other nodes. */ // If all peer nodes have count >= limit, hotspot is acceptable diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java index 60aacb83e0dc1..946b980bfb62f 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java @@ -476,22 +476,22 @@ void unlift() { /** * A cache optimized for small bit-counts (less than 64) and small number of unique permutations of state objects. - * + *

                            * Each combination of states is kept once only, building on the * assumption that the number of permutations is small compared to the * number of bits permutations. For those histories that are difficult to check * we will have many bits combinations that use the same state permutations. - * + *

                            * The smallMap optimization allows us to avoid object overheads for bit-sets up to 64 bit large. - * + *

                            * Comparing set of (bits, state) to smallMap: * (bits, state) : 24 (tuple) + 24 (FixedBitSet) + 24 (bits) + 5 (hash buckets) + 24 (hashmap node). * smallMap bits to {state} : 10 (bits) + 5 (hash buckets) + avg-size of unique permutations. - * + *

                            * The avg-size of the unique permutations part is very small compared to the * sometimes large number of bits combinations (which are the cases where * we run into trouble). - * + *

                            * set of (bits, state) totals 101 bytes compared to smallMap bits to { state } * which totals 15 bytes, ie. a 6x improvement in memory usage. */ diff --git a/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsIntegTestCase.java b/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsIntegTestCase.java index e4774030f21cb..5f4d92d65548b 100644 --- a/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsIntegTestCase.java @@ -52,11 +52,11 @@ * Fields available upon process startup: type, timestamp, level, component, * message, node.name, cluster.name. * Whereas node.id and cluster.uuid are available later once the first clusterState has been received. - * + *

                            * * node.name, cluster.name, node.id, cluster.uuid * should not change across all log lines - * + *

                            * Note that this won't pass for nodes in clusters that don't have the node name defined in opensearch.yml and start * with DEBUG or TRACE level logging. Those nodes log a few lines before the node.name is set by LogConfigurator.setNodeName. */ diff --git a/test/framework/src/main/java/org/opensearch/index/MockEngineFactoryPlugin.java b/test/framework/src/main/java/org/opensearch/index/MockEngineFactoryPlugin.java index 72d34676850b9..0a47db4c740d6 100644 --- a/test/framework/src/main/java/org/opensearch/index/MockEngineFactoryPlugin.java +++ b/test/framework/src/main/java/org/opensearch/index/MockEngineFactoryPlugin.java @@ -46,7 +46,7 @@ /** * A plugin to use {@link MockEngineFactory}. - * + *

                            * Subclasses may override the reader wrapper used. */ public class MockEngineFactoryPlugin extends Plugin implements EnginePlugin { diff --git a/test/framework/src/main/java/org/opensearch/index/RandomCreateIndexGenerator.java b/test/framework/src/main/java/org/opensearch/index/RandomCreateIndexGenerator.java index f016d9450425d..5ab77783b2bac 100644 --- a/test/framework/src/main/java/org/opensearch/index/RandomCreateIndexGenerator.java +++ b/test/framework/src/main/java/org/opensearch/index/RandomCreateIndexGenerator.java @@ -56,7 +56,7 @@ private RandomCreateIndexGenerator() {} /** * Returns a random {@link CreateIndexRequest}. - * + *

                            * Randomizes the index name, the aliases, mappings and settings associated with the * index. If present, the mapping definition will be nested under a type name. */ diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase.java index 83bb2f1ee7d65..77137073aa30f 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase.java @@ -149,7 +149,7 @@ protected Set unsupportedProperties() { /** * Add type-specific modifiers for consistency checking. - * + *

                            * This should be called in a {@code @Before} method */ protected void addModifier(String property, boolean updateable, BiConsumer method) { @@ -158,7 +158,7 @@ protected void addModifier(String property, boolean updateable, BiConsumer /** * Add type-specific modifiers for consistency checking. - * + *

                            * This should be called in a {@code @Before} method */ protected void addBooleanModifier(String property, boolean updateable, BiConsumer method) { diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase2.java b/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase2.java index 1987a49431bf9..5dfb2f16a1aae 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase2.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase2.java @@ -139,7 +139,7 @@ protected Set unsupportedProperties() { /** * Add type-specific modifiers for consistency checking. - * + *

                            * This should be called in a {@code @Before} method */ protected void addModifier(String property, boolean updateable, BiConsumer method) { @@ -148,7 +148,7 @@ protected void addModifier(String property, boolean updateable, BiConsumer /** * Add type-specific modifiers for consistency checking. - * + *

                            * This should be called in a {@code @Before} method */ protected void addBooleanModifier(String property, boolean updateable, BiConsumer method) { diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index ffa37817e8a03..9dc230474482f 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -1086,7 +1086,7 @@ protected void recoverReplica( /** * Recovers a replica from the give primary, allow the user to supply a custom recovery target. A typical usage of a custom recovery * target is to assert things in the various stages of recovery. - * + *

                            * Note: this method keeps the shard in {@link IndexShardState#POST_RECOVERY} and doesn't start it. * * @param replica the recovery target shard diff --git a/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java b/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java index ef13e065968df..0f5e043ee1135 100644 --- a/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java +++ b/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java @@ -43,7 +43,7 @@ public class PriviledgedMockMaker implements MockMaker { * since Mockito does not support SecurityManager out of the box. The method has to be called by * test framework before the SecurityManager is being set, otherwise additional permissions have * to be granted to the caller: - * + *

                            * permission java.security.Permission "createAccessControlContext" * */ diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java index bf020b1bc292d..dff9b997d87db 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java @@ -265,7 +265,7 @@ protected static void drainInputStream(final InputStream inputStream) throws IOE /** * HTTP handler that injects random service errors - * + *

                            * Note: it is not a good idea to allow this handler to simulate too many errors as it would * slow down the test suite. */ @@ -339,7 +339,7 @@ public interface DelegatingHttpHandler extends HttpHandler { /** * HTTP handler that allows collect request stats per request type. - * + *

                            * Implementors should keep track of the desired requests on {@link #maybeTrack(String, Headers)}. */ @SuppressForbidden(reason = "this test uses a HttpServer to emulate a cloud-based storage service") @@ -377,7 +377,7 @@ public void handle(HttpExchange exchange) throws IOException { /** * Tracks the given request if it matches the criteria. - * + *

                            * The request is represented as: * Request = Method SP Request-URI * diff --git a/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java index cb0614ddeb808..83b245a1bcecb 100644 --- a/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java @@ -58,14 +58,14 @@ /** * A mocked script engine that can be used for testing purpose. - * + *

                            * This script engine allows to define a set of predefined scripts that basically a combination of a key and a * function: - * + *

                            * The key can be anything as long as it is a {@link String} and is used to resolve the scripts * at compilation time. For inline scripts, the key can be a description of the script. For stored and file scripts, * the source must match a key in the predefined set of scripts. - * + *

                            * The function is used to provide the result of the script execution and can return anything. */ public class MockScriptEngine implements ScriptEngine { diff --git a/test/framework/src/main/java/org/opensearch/script/ScoreAccessor.java b/test/framework/src/main/java/org/opensearch/script/ScoreAccessor.java index 26b439fa6438f..9cf2141555957 100644 --- a/test/framework/src/main/java/org/opensearch/script/ScoreAccessor.java +++ b/test/framework/src/main/java/org/opensearch/script/ScoreAccessor.java @@ -39,7 +39,7 @@ /** * A float encapsulation that dynamically accesses the score of a document. - * + *

                            * The provided {@link DocLookup} is used to retrieve the score * for the current document. */ diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index 5c649f1dc832d..82f15a590bea6 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -685,7 +685,7 @@ protected static IndexReader maybeWrapReaderEs(DirectoryReader reader) throws IO * Implementors should return a list of {@link ValuesSourceType} that the aggregator supports. * This is used to test the matrix of supported/unsupported field types against the aggregator * and verify it works (or doesn't) as expected. - * + *

                            * If this method is implemented, {@link AggregatorTestCase#createAggBuilderForTypeTest(MappedFieldType, String)} * should be implemented as well. * @@ -702,7 +702,7 @@ protected List getSupportedValuesSourceTypes() { * The field type and name are provided, and the implementor is expected to return an AggBuilder accordingly. * The AggBuilder should be returned even if the aggregation does not support the field type, because * the test will check if an exception is thrown in that case. - * + *

                            * The list of supported types are provided by {@link AggregatorTestCase#getSupportedValuesSourceTypes()}, * which must also be implemented. * @@ -720,7 +720,7 @@ protected AggregationBuilder createAggBuilderForTypeTest(MappedFieldType fieldTy * A method that allows implementors to specifically denylist particular field types (based on their content_name). * This is needed in some areas where the ValuesSourceType is not granular enough, for example integer values * vs floating points, or `keyword` bytes vs `binary` bytes (which are not searchable) - * + *

                            * This is a denylist instead of an allowlist because there are vastly more field types than ValuesSourceTypes, * and it's expected that these unsupported cases are exceptional rather than common */ @@ -734,7 +734,7 @@ protected List unsupportedMappedFieldTypes() { * is provided by the implementor class, and it is executed against each field type in turn. If * an exception is thrown when the field is supported, that will fail the test. Similarly, if * an exception _is not_ thrown when a field is unsupported, that will also fail the test. - * + *

                            * Exception types/messages are not currently checked, just presence/absence of an exception. */ public void testSupportedFieldTypes() throws IOException { @@ -825,7 +825,7 @@ private ValuesSourceType fieldToVST(MappedFieldType fieldType) { /** * Helper method to write a single document with a single value specific to the requested fieldType. - * + *

                            * Throws an exception if it encounters an unknown field type, to prevent new ones from sneaking in without * being tested. */ diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractDiffableSerializationTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractDiffableSerializationTestCase.java index f5d358a162bd1..10e782e6af8da 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractDiffableSerializationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractDiffableSerializationTestCase.java @@ -41,8 +41,8 @@ /** * An abstract test case to ensure correct behavior of Diffable. - * - * This class can be used as a based class for tests of Metadata.Custom classes and other classes that support, + *

                            + * This class can be used as a based class for tests of Metadata.Custom classes and other classes that support {@link org.opensearch.core.common.io.stream.Writeable } serialization, XContent-based serialization and is diffable. * Writable serialization, XContent-based serialization and is diffable. */ public abstract class AbstractDiffableSerializationTestCase & ToXContent> extends AbstractSerializingTestCase { diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractDiffableWireSerializationTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractDiffableWireSerializationTestCase.java index ff7c39cd8f102..3f97f0704d733 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractDiffableWireSerializationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractDiffableWireSerializationTestCase.java @@ -40,8 +40,8 @@ /** * An abstract test case to ensure correct behavior of Diffable. - * - * This class can be used as a based class for tests of ClusterState.Custom classes and other classes that support, + *

                            + * This class can be used as a based class for tests of ClusterState.Custom classes and other classes that support {@link org.opensearch.core.common.io.stream.Writeable } serialization and is diffable. * Writable serialization and is diffable. */ public abstract class AbstractDiffableWireSerializationTestCase> extends AbstractWireSerializingTestCase { diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java index 47fe85d28975f..afd93e1b72fbb 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java @@ -213,7 +213,7 @@ public void testUnknownObjectException() throws IOException { /** * Traverses the json tree of the valid query provided as argument and mutates it one or more times by adding one object within each * object encountered. - * + *

                            * For instance given the following valid term query: * { * "term" : { @@ -222,7 +222,7 @@ public void testUnknownObjectException() throws IOException { * } * } * } - * + *

                            * The following two mutations will be generated, and an exception is expected when trying to parse them: * { * "term" : { @@ -233,7 +233,7 @@ public void testUnknownObjectException() throws IOException { * } * } * } - * + *

                            * { * "term" : { * "field" : { @@ -243,7 +243,7 @@ public void testUnknownObjectException() throws IOException { * } * } * } - * + *

                            * Every mutation is then added to the list of results with a boolean flag indicating if a parsing exception is expected or not * for the mutation. Some specific objects do not cause any exception as they can hold arbitrary content; they are passed using the * arbitraryMarkers parameter. @@ -768,7 +768,7 @@ protected static String randomMinimumShouldMatch() { /** * Call this method to check a valid json string representing the query under test against * it's generated json. - * + *

                            * Note: By the time of this writing (Nov 2015) all queries are taken from the query dsl * reference docs mirroring examples there. Here's how the queries were generated: * diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractWireTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractWireTestCase.java index 9a4363dc4d946..64c5d916d55d2 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractWireTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractWireTestCase.java @@ -124,9 +124,9 @@ protected final T copyInstance(T instance) throws IOException { /** * Get the {@link NamedWriteableRegistry} to use when de-serializing the object. - * + *

                            * Override this method if you need to register {@link NamedWriteable}s for the test object to de-serialize. - * + *

                            * By default this will return a {@link NamedWriteableRegistry} with no registered {@link NamedWriteable}s */ protected NamedWriteableRegistry getNamedWriteableRegistry() { diff --git a/test/framework/src/main/java/org/opensearch/test/MockKeywordPlugin.java b/test/framework/src/main/java/org/opensearch/test/MockKeywordPlugin.java index c27f3f169fbae..3d829d77dd323 100644 --- a/test/framework/src/main/java/org/opensearch/test/MockKeywordPlugin.java +++ b/test/framework/src/main/java/org/opensearch/test/MockKeywordPlugin.java @@ -44,7 +44,7 @@ /** * Some tests rely on the keyword tokenizer, but this tokenizer isn't part of lucene-core and therefor not available * in some modules. What this test plugin does, is use the mock tokenizer and advertise that as the keyword tokenizer. - * + *

                            * Most tests that need this test plugin use normalizers. When normalizers are constructed they try to resolve the * keyword tokenizer, but if the keyword tokenizer isn't available then constructing normalizers will fail. */ diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 0b80c6e577f95..da829a3bc5225 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -1343,7 +1343,7 @@ protected void ensureStableCluster(int nodeCount, TimeValue timeValue, boolean l /** * Ensures that all nodes in the cluster are connected to each other. - * + *

                            * Some network disruptions may leave nodes that are not the cluster-manager disconnected from each other. * {@link org.opensearch.cluster.NodeConnectionsService} will eventually reconnect but it's * handy to be able to ensure this happens faster @@ -2311,11 +2311,11 @@ public static void afterClass() throws Exception { private static void initializeSuiteScope() throws Exception { Class targetClass = getTestClass(); - /** - * Note we create these test class instance via reflection - * since JUnit creates a new instance per test and that is also - * the reason why INSTANCE is static since this entire method - * must be executed in a static context. + /* + Note we create these test class instance via reflection + since JUnit creates a new instance per test and that is also + the reason why INSTANCE is static since this entire method + must be executed in a static context. */ assert INSTANCE == null; if (isSuiteScopedTest(targetClass)) { diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTokenStreamTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTokenStreamTestCase.java index e853c1e6314e1..9f7f33b351be3 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTokenStreamTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTokenStreamTestCase.java @@ -48,9 +48,9 @@ @TimeoutSuite(millis = TimeUnits.HOUR) @LuceneTestCase.SuppressReproduceLine @LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") -/** - * Basic test case for token streams. the assertion methods in this class will - * run basic checks to enforce correct behavior of the token streams. +/* + Basic test case for token streams. the assertion methods in this class will + run basic checks to enforce correct behavior of the token streams. */ public abstract class OpenSearchTokenStreamTestCase extends BaseTokenStreamTestCase { diff --git a/test/framework/src/main/java/org/opensearch/test/TestCluster.java b/test/framework/src/main/java/org/opensearch/test/TestCluster.java index a9d9abf69ee41..8c41e6e5d5b38 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/TestCluster.java @@ -276,7 +276,7 @@ public void wipeRepositories(String... repositories) { /** * Ensures that any breaker statistics are reset to 0. - * + *

                            * The implementation is specific to the test cluster, because the act of * checking some breaker stats can increase them. */ diff --git a/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java b/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java index 0cb39a29e8159..2c535211ca456 100644 --- a/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java @@ -164,10 +164,10 @@ private static String differenceBetweenObjectsIgnoringArrayOrder(String path, Ob * This method takes the input xContent data and adds a random field value, inner object or array into each * json object. This can e.g. be used to test if parsers that handle the resulting xContent can handle the * augmented xContent correctly, for example when testing lenient parsing. - * + *

                            * If the xContent output contains objects that should be skipped of such treatment, an optional filtering * {@link Predicate} can be supplied that checks xContent paths that should be excluded from this treatment. - * + *

                            * This predicate should check the xContent path that we want to insert to and return {@code true} if the * path should be excluded. Paths are string concatenating field names and array indices, so e.g. in: * @@ -188,7 +188,7 @@ private static String differenceBetweenObjectsIgnoringArrayOrder(String path, Ob * * * "foo1.bar.2.baz" would point to the desired insert location. - * + *

                            * To exclude inserting into the "foo1" object we would user a {@link Predicate} like *

                                  * {@code
                            @@ -257,12 +257,12 @@ public static BytesReference insertRandomFields(
                                  * This utility method takes an XContentParser and walks the xContent structure to find all
                                  * possible paths to where a new object or array starts. This can be used in tests that add random
                                  * xContent values to test parsing code for errors or to check their robustness against new fields.
                            -     *
                            +     * 

                            * The path uses dot separated fieldnames and numbers for array indices, similar to what we do in * {@link ObjectPath}. - * + *

                            * The {@link Stack} passed in should initially be empty, it gets pushed to by recursive calls - * + *

                            * As an example, the following json xContent: *

                                  *     {
                            diff --git a/test/framework/src/main/java/org/opensearch/test/client/NoOpClient.java b/test/framework/src/main/java/org/opensearch/test/client/NoOpClient.java
                            index a0e87d5fd7189..45d779b3e8697 100644
                            --- a/test/framework/src/main/java/org/opensearch/test/client/NoOpClient.java
                            +++ b/test/framework/src/main/java/org/opensearch/test/client/NoOpClient.java
                            @@ -47,7 +47,7 @@
                             /**
                              * Client that always responds with {@code null} to every request. Override {@link #doExecute(ActionType, ActionRequest, ActionListener)}
                              * for testing.
                            - *
                            + * 

                            * See also {@link NoOpNodeClient} if you need to mock a {@link org.opensearch.client.node.NodeClient}. */ public class NoOpClient extends AbstractClient { diff --git a/test/framework/src/main/java/org/opensearch/test/client/NoOpNodeClient.java b/test/framework/src/main/java/org/opensearch/test/client/NoOpNodeClient.java index 7dfe9298e9a92..4e84fe3b91d15 100644 --- a/test/framework/src/main/java/org/opensearch/test/client/NoOpNodeClient.java +++ b/test/framework/src/main/java/org/opensearch/test/client/NoOpNodeClient.java @@ -55,7 +55,7 @@ * Client that always response with {@code null} to every request. Override {@link #doExecute(ActionType, ActionRequest, ActionListener)}, * {@link #executeLocally(ActionType, ActionRequest, ActionListener)}, or {@link #executeLocally(ActionType, ActionRequest, TaskListener)} * for testing. - * + *

                            * See also {@link NoOpClient} if you do not specifically need a {@link NodeClient}. */ public class NoOpNodeClient extends NodeClient { diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java b/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java index 690e15dd80873..44837c37962b4 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java @@ -248,7 +248,7 @@ public TimeValue expectedTimeToHeal() { /** * resolves all threads belonging to given node and suspends them if their current stack trace * is "safe". Threads are added to nodeThreads if suspended. - * + *

                            * returns true if some live threads were found. The caller is expected to call this method * until no more "live" are found. */ diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/NetworkDisruption.java b/test/framework/src/main/java/org/opensearch/test/disruption/NetworkDisruption.java index e77b8f5b24897..62e19750a363b 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/NetworkDisruption.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/NetworkDisruption.java @@ -109,7 +109,7 @@ public void ensureHealthy(InternalTestCluster cluster) { /** * Ensures that all nodes in the cluster are connected to each other. - * + *

                            * Some network disruptions may leave nodes that are not the cluster-manager disconnected from each other. * {@link org.opensearch.cluster.NodeConnectionsService} will eventually reconnect but it's * handy to be able to ensure this happens faster diff --git a/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java b/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java index 107e42ce43487..7462062a0cd46 100644 --- a/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java +++ b/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java @@ -57,13 +57,13 @@ * A gateway allocator implementation that keeps an in memory list of started shard allocation * that are used as replies to the, normally async, fetch data requests. The in memory list * is adapted when shards are started and failed. - * + *

                            * Nodes leaving and joining the cluster do not change the list of shards the class tracks but * rather serves as a filter to what is returned by fetch data. Concretely - fetch data will * only return shards that were started on nodes that are currently part of the cluster. - * + *

                            * For now only primary shard related data is fetched. Replica request always get an empty response. - * + *

                            * * This class is useful to use in unit tests that require the functionality of {@link GatewayAllocator} but do * not have all the infrastructure required to use it. diff --git a/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestIssueLogging.java b/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestIssueLogging.java index f81c47b6a0a08..97fdcb796f195 100644 --- a/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestIssueLogging.java +++ b/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestIssueLogging.java @@ -43,7 +43,7 @@ /** * Annotation used to set a custom log level when investigating test failures. Do not use this annotation to explicitly * control the logging level in tests; instead, use {@link TestLogging}. - * + *

                            * It supports multiple logger:level comma-separated key-value pairs of logger:level (e.g., * org.opensearch.cluster.metadata:TRACE). Use the _root keyword to set the root logger level. */ diff --git a/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestLogging.java b/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestLogging.java index d440fae409897..6c0b87ac67354 100644 --- a/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestLogging.java +++ b/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestLogging.java @@ -43,7 +43,7 @@ /** * Annotation used to set a custom log level for controlling logging behavior in tests. Do not use this annotation when * investigating test failures; instead, use {@link TestIssueLogging}. - * + *

                            * It supports multiple logger:level comma-separated key-value pairs of logger:level (e.g., * org.opensearch.cluster.metadata:TRACE). Use the _root keyword to set the root logger level. */ diff --git a/test/framework/src/main/java/org/opensearch/test/junit/listeners/LoggingListener.java b/test/framework/src/main/java/org/opensearch/test/junit/listeners/LoggingListener.java index 7c94c16b77471..ea2c5d055ed8b 100644 --- a/test/framework/src/main/java/org/opensearch/test/junit/listeners/LoggingListener.java +++ b/test/framework/src/main/java/org/opensearch/test/junit/listeners/LoggingListener.java @@ -53,7 +53,7 @@ * A {@link RunListener} that allows changing the log level for a specific test method. When a test method is annotated with the * {@link TestLogging} annotation, the level for the specified loggers will be internally saved before the test method execution and * overridden with the specified ones. At the end of the test method execution the original loggers levels will be restored. - * + *

                            * This class is not thread-safe. Given the static nature of the logging API, it assumes that tests are never run concurrently in the same * JVM. For the very same reason no synchronization has been implemented regarding the save/restore process of the original loggers * levels. diff --git a/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java index dc13924195254..a77865579f3b3 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java @@ -103,7 +103,7 @@ protected void dispatchRequest(RestRequest request) { /** * A mocked {@link NodeClient} which can be easily reconfigured to verify arbitrary verification * functions, and can be reset to allow reconfiguration partway through a test without having to construct a new object. - * + *

                            * By default, will throw {@link AssertionError} when any execution method is called, unless configured otherwise using * {@link #setExecuteVerifier(BiFunction)} or {@link #setExecuteLocallyVerifier(BiFunction)}. */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcher.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcher.java index eeaa76b6ca1b3..3a80f25c5d4ab 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcher.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcher.java @@ -35,7 +35,7 @@ /** * Matches denylist patterns. - * + *

                            * Currently the following syntax is supported: * *

                              diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java index 10fb1e52259a9..8e0bc03b08442 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java @@ -41,7 +41,7 @@ * Allows to register additional features supported by the tests runner. * This way any runner can add extra features and use proper skip sections to avoid * breaking others runners till they have implemented the new feature as well. - * + *

                              * Once all runners have implemented the feature, it can be removed from the list * and the related skip sections can be removed from the tests as well. */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java index c3289e7651056..12599f003077d 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java @@ -78,7 +78,7 @@ /** * Runs a suite of yaml tests shared with all the official OpenSearch * clients against an opensearch cluster. - * + *

                              * The suite timeout is extended to account for projects with a large number of tests. */ @TimeoutSuite(millis = 30 * TimeUnits.MINUTE) @@ -110,9 +110,9 @@ public abstract class OpenSearchClientYamlSuiteTestCase extends OpenSearchRestTe /** * This separator pattern matches ',' except it is preceded by a '\'. * This allows us to support ',' within paths when it is escaped with a slash. - * + *

                              * For example, the path string "/a/b/c\,d/e/f,/foo/bar,/baz" is separated to "/a/b/c\,d/e/f", "/foo/bar" and "/baz". - * + *

                              * For reference, this regular expression feature is known as zero-width negative look-behind. * */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java index ef8bf37d88227..ae1c33b1eb47f 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java @@ -81,7 +81,7 @@ /** * Represents a do section: - * + *

                              * - do: * catch: missing * headers: diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java index 2132c2ebab51c..8e929eff44348 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java @@ -47,7 +47,7 @@ /** * Represents a gte assert section: - * + *

                              * - gte: { fields._ttl: 0 } */ public class GreaterThanEqualToAssertion extends Assertion { diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsFalseAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsFalseAssertion.java index 6cfbbfc5df8d5..999486ad04455 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsFalseAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsFalseAssertion.java @@ -45,7 +45,7 @@ /** * Represents an is_false assert section: - * + *

                              * - is_false: get.fields.bar * */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsTrueAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsTrueAssertion.java index e746542d89126..bf5822406f014 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsTrueAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsTrueAssertion.java @@ -46,7 +46,7 @@ /** * Represents an is_true assert section: - * + *

                              * - is_true: get.fields.bar * */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java index 263e0c8fb9c42..d6e2ae1e23996 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java @@ -46,7 +46,7 @@ /** * Represents a lt assert section: - * + *

                              * - lt: { fields._ttl: 20000} * */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java index f0e7d0c01b8f0..ee46c04496f32 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java @@ -47,7 +47,7 @@ /** * Represents a lte assert section: - * + *

                              * - lte: { fields._ttl: 0 } */ public class LessThanOrEqualToAssertion extends Assertion { diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/MatchAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/MatchAssertion.java index a97e3e4cb77ed..77d8f3154729e 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/MatchAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/MatchAssertion.java @@ -50,7 +50,7 @@ /** * Represents a match assert section: - * + *

                              * - match: { get.fields._routing: "5" } * */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/SetSection.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/SetSection.java index a561a53119a96..c8004d9807cb9 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/SetSection.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/SetSection.java @@ -42,7 +42,7 @@ /** * Represents a set section: - * + *

                              * - set: {_scroll_id: scroll_id} * */ From ed1b624481eb9c0cc2fd941b54eb0243807b399d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Oct 2023 07:31:44 -0400 Subject: [PATCH 13/33] Bump de.thetaphi:forbiddenapis from 3.5.1 to 3.6 in /buildSrc (#10508) * Bump de.thetaphi:forbiddenapis from 3.5.1 to 3.6 in /buildSrc Bumps de.thetaphi:forbiddenapis from 3.5.1 to 3.6. --- updated-dependencies: - dependency-name: de.thetaphi:forbiddenapis dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + buildSrc/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 467e019270a47..572a8346a0686 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -122,6 +122,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.avro:avro` from 1.11.2 to 1.11.3 ([#10210](https://github.com/opensearch-project/OpenSearch/pull/10210)) - Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) - Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) +- Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index ccf6a6010ff63..6d3e0f018657e 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -114,7 +114,7 @@ dependencies { api 'com.github.johnrengelman:shadow:8.1.1' api 'org.jdom:jdom2:2.0.6.1' api "org.jetbrains.kotlin:kotlin-stdlib-jdk8:${props.getProperty('kotlin')}" - api 'de.thetaphi:forbiddenapis:3.5.1' + api 'de.thetaphi:forbiddenapis:3.6' api 'com.avast.gradle:gradle-docker-compose-plugin:0.16.12' api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" api 'org.apache.maven:maven-model:3.9.4' From 9bcd7eac2b3fde6953f74ea3ecfcb1a6a51404d3 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Wed, 11 Oct 2023 18:38:08 +0530 Subject: [PATCH 14/33] [S3 Repository] Make bulk delete size configurable (#10445) Signed-off-by: Gaurav Bafna --- .../opensearch/repositories/s3/S3BlobContainer.java | 11 ++--------- .../org/opensearch/repositories/s3/S3BlobStore.java | 10 ++++++++++ .../org/opensearch/repositories/s3/S3Repository.java | 11 +++++++++++ .../s3/S3BlobContainerMockClientTests.java | 2 ++ .../repositories/s3/S3BlobContainerRetriesTests.java | 2 ++ .../repositories/s3/S3BlobStoreContainerTests.java | 7 +++++++ 6 files changed, 34 insertions(+), 9 deletions(-) diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index 9ffdba5eaae3a..9777bd974d56c 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -113,13 +113,6 @@ class S3BlobContainer extends AbstractBlobContainer implements AsyncMultiStreamB private static final Logger logger = LogManager.getLogger(S3BlobContainer.class); - /** - * Maximum number of deletes in a {@link DeleteObjectsRequest}. - * - * @see S3 Documentation. - */ - private static final int MAX_BULK_DELETES = 1000; - private final S3BlobStore blobStore; private final String keyPath; @@ -339,12 +332,12 @@ private void doDeleteBlobs(List blobNames, boolean relative) throws IOEx outstanding = new HashSet<>(blobNames); } try (AmazonS3Reference clientReference = blobStore.clientReference()) { - // S3 API only allows 1k blobs per delete so we split up the given blobs into requests of max. 1k deletes + // S3 API allows 1k blobs per delete so we split up the given blobs into requests of bulk size deletes final List deleteRequests = new ArrayList<>(); final List partition = new ArrayList<>(); for (String key : outstanding) { partition.add(key); - if (partition.size() == MAX_BULK_DELETES) { + if (partition.size() == blobStore.getBulkDeletesSize()) { deleteRequests.add(bulkDelete(blobStore.bucket(), partition)); partition.clear(); } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java index 3dd373b5b9f32..80005d92344a4 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java @@ -52,6 +52,7 @@ import static org.opensearch.repositories.s3.S3Repository.BUCKET_SETTING; import static org.opensearch.repositories.s3.S3Repository.BUFFER_SIZE_SETTING; +import static org.opensearch.repositories.s3.S3Repository.BULK_DELETE_SIZE; import static org.opensearch.repositories.s3.S3Repository.CANNED_ACL_SETTING; import static org.opensearch.repositories.s3.S3Repository.SERVER_SIDE_ENCRYPTION_SETTING; import static org.opensearch.repositories.s3.S3Repository.STORAGE_CLASS_SETTING; @@ -74,6 +75,8 @@ class S3BlobStore implements BlobStore { private volatile StorageClass storageClass; + private volatile int bulkDeletesSize; + private volatile RepositoryMetadata repositoryMetadata; private final StatsMetricPublisher statsMetricPublisher = new StatsMetricPublisher(); @@ -92,6 +95,7 @@ class S3BlobStore implements BlobStore { ByteSizeValue bufferSize, String cannedACL, String storageClass, + int bulkDeletesSize, RepositoryMetadata repositoryMetadata, AsyncTransferManager asyncTransferManager, AsyncExecutorContainer priorityExecutorBuilder, @@ -105,6 +109,7 @@ class S3BlobStore implements BlobStore { this.bufferSize = bufferSize; this.cannedACL = initCannedACL(cannedACL); this.storageClass = initStorageClass(storageClass); + this.bulkDeletesSize = bulkDeletesSize; this.repositoryMetadata = repositoryMetadata; this.asyncTransferManager = asyncTransferManager; this.normalExecutorBuilder = normalExecutorBuilder; @@ -119,6 +124,7 @@ public void reload(RepositoryMetadata repositoryMetadata) { this.bufferSize = BUFFER_SIZE_SETTING.get(repositoryMetadata.settings()); this.cannedACL = initCannedACL(CANNED_ACL_SETTING.get(repositoryMetadata.settings())); this.storageClass = initStorageClass(STORAGE_CLASS_SETTING.get(repositoryMetadata.settings())); + this.bulkDeletesSize = BULK_DELETE_SIZE.get(repositoryMetadata.settings()); } @Override @@ -150,6 +156,10 @@ public long bufferSizeInBytes() { return bufferSize.getBytes(); } + public int getBulkDeletesSize() { + return bulkDeletesSize; + } + @Override public BlobContainer blobContainer(BlobPath path) { return new S3BlobContainer(path, this); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java index bbc3451be1535..aaf5b79891cdc 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java @@ -190,6 +190,13 @@ class S3Repository extends MeteredBlobStoreRepository { new ByteSizeValue(5, ByteSizeUnit.TB) ); + /** + * Maximum number of deletes in a DeleteObjectsRequest. + * + * @see S3 Documentation. + */ + static final Setting BULK_DELETE_SIZE = Setting.intSetting("bulk_delete_size", 1000, 1, 1000); + /** * Sets the S3 storage class type for the backup files. Values may be standard, reduced_redundancy, * standard_ia, onezone_ia and intelligent_tiering. Defaults to standard. @@ -231,6 +238,8 @@ class S3Repository extends MeteredBlobStoreRepository { private final AsyncExecutorContainer normalExecutorBuilder; private final Path pluginConfigPath; + private volatile int bulkDeletesSize; + // Used by test classes S3Repository( final RepositoryMetadata metadata, @@ -340,6 +349,7 @@ protected S3BlobStore createBlobStore() { bufferSize, cannedACL, storageClass, + bulkDeletesSize, metadata, asyncUploadUtils, priorityExecutorBuilder, @@ -399,6 +409,7 @@ private void readRepositoryMetadata() { this.serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); this.storageClass = STORAGE_CLASS_SETTING.get(metadata.settings()); this.cannedACL = CANNED_ACL_SETTING.get(metadata.settings()); + this.bulkDeletesSize = BULK_DELETE_SIZE.get(metadata.settings()); if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) { // provided repository settings deprecationLogger.deprecate( diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java index 8c8524212e08e..6eb8faa746d34 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java @@ -64,6 +64,7 @@ import org.mockito.invocation.InvocationOnMock; +import static org.opensearch.repositories.s3.S3Repository.BULK_DELETE_SIZE; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -387,6 +388,7 @@ private S3BlobStore createBlobStore() { S3Repository.BUFFER_SIZE_SETTING.getDefault(Settings.EMPTY), S3Repository.CANNED_ACL_SETTING.getDefault(Settings.EMPTY), S3Repository.STORAGE_CLASS_SETTING.getDefault(Settings.EMPTY), + BULK_DELETE_SIZE.get(Settings.EMPTY), repositoryMetadata, new AsyncTransferManager( S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.getDefault(Settings.EMPTY).getBytes(), diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java index ecad68474b601..a2214f5218991 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -95,6 +95,7 @@ import static org.opensearch.repositories.s3.S3ClientSettings.MAX_RETRIES_SETTING; import static org.opensearch.repositories.s3.S3ClientSettings.READ_TIMEOUT_SETTING; import static org.opensearch.repositories.s3.S3ClientSettings.REGION; +import static org.opensearch.repositories.s3.S3Repository.BULK_DELETE_SIZE; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -215,6 +216,7 @@ protected AsyncMultiStreamBlobContainer createBlobContainer( bufferSize == null ? S3Repository.BUFFER_SIZE_SETTING.getDefault(Settings.EMPTY) : bufferSize, S3Repository.CANNED_ACL_SETTING.getDefault(Settings.EMPTY), S3Repository.STORAGE_CLASS_SETTING.getDefault(Settings.EMPTY), + BULK_DELETE_SIZE.get(Settings.EMPTY), repositoryMetadata, new AsyncTransferManager( S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.getDefault(Settings.EMPTY).getBytes(), diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index e266bba372d80..2701cae6a733b 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -276,10 +276,12 @@ public void testDelete() throws IOException { final String bucketName = randomAlphaOfLengthBetween(1, 10); final BlobPath blobPath = new BlobPath(); + int bulkDeleteSize = 5; final S3BlobStore blobStore = mock(S3BlobStore.class); when(blobStore.bucket()).thenReturn(bucketName); when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.getBulkDeletesSize()).thenReturn(bulkDeleteSize); final S3Client client = mock(S3Client.class); doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference(); @@ -297,8 +299,11 @@ public void testDelete() throws IOException { when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); final List keysDeleted = new ArrayList<>(); + AtomicInteger deleteCount = new AtomicInteger(); doAnswer(invocation -> { DeleteObjectsRequest deleteObjectsRequest = invocation.getArgument(0); + deleteCount.getAndIncrement(); + logger.info("Object sizes are{}", deleteObjectsRequest.delete().objects().size()); keysDeleted.addAll(deleteObjectsRequest.delete().objects().stream().map(ObjectIdentifier::key).collect(Collectors.toList())); return DeleteObjectsResponse.builder().build(); }).when(client).deleteObjects(any(DeleteObjectsRequest.class)); @@ -311,6 +316,8 @@ public void testDelete() throws IOException { // keysDeleted will have blobPath also assertEquals(listObjectsV2ResponseIterator.getKeysListed().size(), keysDeleted.size() - 1); assertTrue(keysDeleted.contains(blobPath.buildAsString())); + // keysDeleted will have blobPath also + assertEquals((int) Math.ceil(((double) keysDeleted.size() + 1) / bulkDeleteSize), deleteCount.get()); keysDeleted.remove(blobPath.buildAsString()); assertEquals(new HashSet<>(listObjectsV2ResponseIterator.getKeysListed()), new HashSet<>(keysDeleted)); } From c4f5dd9f4d45530ad22f165f5ec88736534e5827 Mon Sep 17 00:00:00 2001 From: Ticheng Lin <51488860+ticheng-aws@users.noreply.github.com> Date: Wed, 11 Oct 2023 07:31:18 -0700 Subject: [PATCH 15/33] Fix flaky query profile phase tests with concurrent search enabled (#10547) (#10547) Signed-off-by: Ticheng Lin --- CHANGELOG.md | 2 +- .../search/query/QueryProfilePhaseTests.java | 22 +++++-------------- 2 files changed, 6 insertions(+), 18 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 572a8346a0686..2e107d2c04539 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -158,4 +158,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x \ No newline at end of file +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x diff --git a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java index 92d27032b62e3..28af8a63cfba8 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java @@ -1066,10 +1066,6 @@ public void testIndexSortScrollOptimization() throws Exception { } public void testDisableTopScoreCollection() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10469", - executor != null - ); Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(new StandardAnalyzer()); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); @@ -1108,10 +1104,10 @@ public void testDisableTopScoreCollection() throws Exception { assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); if (executor != null) { assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThanOrEqualTo(0L)); assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(0L)); assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); @@ -1241,10 +1237,6 @@ public void testMinScore() throws Exception { } public void testMaxScore() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/9932", - executor != null - ); Directory dir = newDirectory(); final Sort sort = new Sort(new SortField("filter", SortField.Type.STRING)); IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); @@ -1286,7 +1278,7 @@ public void testMaxScore() throws Exception { assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("min_score"), greaterThanOrEqualTo(0L)); assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(4L)); assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(0L)); assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(1L)); } @@ -1364,10 +1356,6 @@ public void testMaxScore() throws Exception { } public void testCollapseQuerySearchResults() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10139", - executor != null - ); Directory dir = newDirectory(); final Sort sort = new Sort(new SortField("user", SortField.Type.INT)); IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); @@ -1412,7 +1400,7 @@ public void testCollapseQuerySearchResults() throws Exception { assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); - assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(2L)); assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(6L)); } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); @@ -1447,7 +1435,7 @@ public void testCollapseQuerySearchResults() throws Exception { assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); - assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(2L)); assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(6L)); } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); From 489de2a49a51e6a9a82fbfeefe29bc587b36206b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Oct 2023 11:38:15 -0400 Subject: [PATCH 16/33] Bump commons-io:commons-io from 2.13.0 to 2.14.0 in /plugins/ingest-attachment (#10294) * Bump commons-io:commons-io in /plugins/ingest-attachment Bumps commons-io:commons-io from 2.13.0 to 2.14.0. --- updated-dependencies: - dependency-name: commons-io:commons-io dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/ingest-attachment/build.gradle | 2 +- plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 | 1 - plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 2e107d2c04539..563f3e43e9707 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -123,6 +123,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) - Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) - Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) +- Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294)) ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 330a17c02bc7a..8945c09fca28b 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -57,7 +57,7 @@ dependencies { runtimeOnly "com.google.guava:guava:${versions.guava}" // Other dependencies api 'org.tukaani:xz:1.9' - api 'commons-io:commons-io:2.13.0' + api 'commons-io:commons-io:2.14.0' api "org.slf4j:slf4j-api:${versions.slf4j}" // character set detection diff --git a/plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 deleted file mode 100644 index c165136eb5822..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8bb2bc9b4df17e2411533a0708a69f983bf5e83b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 new file mode 100644 index 0000000000000..33c5cfe53e01d --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 @@ -0,0 +1 @@ +a4c6e1f6c196339473cd2e1b037f0eb97c62755b \ No newline at end of file From c55e1b47bffd40defc86686e9c6961d32dd087c2 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 11 Oct 2023 14:03:49 -0400 Subject: [PATCH 17/33] Add the means to extract the contextual properties from HttpChannel, TcpChannel and TrasportChannel without excessive typecasting (#10562) * Add the means to extract the contextual properties from HttpChannel, TcpChannel and TrasportChannel without excessive typecasting Signed-off-by: Andriy Redko * Address code review comments Signed-off-by: Andriy Redko --------- Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../http/netty4/Netty4HttpChannel.java | 17 +++++++++++++++++ .../transport/netty4/Netty4TcpChannel.java | 13 +++++++++++++ .../java/org/opensearch/http/HttpChannel.java | 14 ++++++++++++++ .../tracing/channels/TraceableHttpChannel.java | 6 ++++++ .../channels/TraceableTcpTransportChannel.java | 6 ++++++ .../transport/TaskTransportChannel.java | 6 ++++++ .../org/opensearch/transport/TcpChannel.java | 15 +++++++++++++++ .../transport/TcpTransportChannel.java | 5 +++++ .../opensearch/transport/TransportChannel.java | 15 +++++++++++++++ 10 files changed, 98 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 563f3e43e9707..5e31085eb3935 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -139,6 +139,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Remote Store] Add support to reload repository metadata inplace ([#9569](https://github.com/opensearch-project/OpenSearch/pull/9569)) - [Metrics Framework] Add Metrics framework. ([#10241](https://github.com/opensearch-project/OpenSearch/pull/10241)) - Updating the separator for RemoteStoreLockManager since underscore is allowed in base64UUID url charset ([#10379](https://github.com/opensearch-project/OpenSearch/pull/10379)) +- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)) ### Deprecated diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java index a83330356e35e..6475a0b744c60 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java @@ -40,6 +40,7 @@ import org.opensearch.transport.netty4.Netty4TcpChannel; import java.net.InetSocketAddress; +import java.util.Optional; import io.netty.channel.Channel; import io.netty.channel.ChannelPipeline; @@ -98,6 +99,22 @@ public Channel getNettyChannel() { return channel; } + @SuppressWarnings("unchecked") + @Override + public Optional get(String name, Class clazz) { + Object handler = getNettyChannel().pipeline().get(name); + + if (handler == null && inboundPipeline() != null) { + handler = inboundPipeline().get(name); + } + + if (handler != null && clazz.isInstance(handler) == true) { + return Optional.of((T) handler); + } + + return Optional.empty(); + } + @Override public String toString() { return "Netty4HttpChannel{" + "localAddress=" + getLocalAddress() + ", remoteAddress=" + getRemoteAddress() + '}'; diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java index 5db1f7c333157..79a5bf9e95121 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java @@ -41,6 +41,7 @@ import org.opensearch.transport.TransportException; import java.net.InetSocketAddress; +import java.util.Optional; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; @@ -164,6 +165,18 @@ public void sendMessage(BytesReference reference, ActionListener listener) } } + @SuppressWarnings("unchecked") + @Override + public Optional get(String name, Class clazz) { + final Object handler = getNettyChannel().pipeline().get(name); + + if (handler != null && clazz.isInstance(handler) == true) { + return Optional.of((T) handler); + } + + return Optional.empty(); + } + public Channel getNettyChannel() { return channel; } diff --git a/server/src/main/java/org/opensearch/http/HttpChannel.java b/server/src/main/java/org/opensearch/http/HttpChannel.java index 6dcdaf9034413..679a5d73c7837 100644 --- a/server/src/main/java/org/opensearch/http/HttpChannel.java +++ b/server/src/main/java/org/opensearch/http/HttpChannel.java @@ -36,6 +36,7 @@ import org.opensearch.core.action.ActionListener; import java.net.InetSocketAddress; +import java.util.Optional; /** * Represents an HTTP comms channel @@ -72,4 +73,17 @@ default void handleException(Exception ex) {} */ InetSocketAddress getRemoteAddress(); + /** + * Returns the contextual property associated with this specific HTTP channel (the + * implementation of how such properties are managed depends on the the particular + * transport engine). + * + * @param name the name of the property + * @param clazz the expected type of the property + * + * @return the value of the property + */ + default Optional get(String name, Class clazz) { + return Optional.empty(); + } } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java index 03848e8e58207..0a9757310fe8b 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java @@ -18,6 +18,7 @@ import java.net.InetSocketAddress; import java.util.Objects; +import java.util.Optional; /** * Tracer wrapped {@link HttpChannel} @@ -92,4 +93,9 @@ public InetSocketAddress getLocalAddress() { public InetSocketAddress getRemoteAddress() { return delegate.getRemoteAddress(); } + + @Override + public Optional get(String name, Class clazz) { + return delegate.get(name, clazz); + } } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java index 333e06eb037cb..bd60c35c3baac 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java @@ -20,6 +20,7 @@ import org.opensearch.transport.TransportChannel; import java.io.IOException; +import java.util.Optional; /** * Tracer wrapped {@link TransportChannel} @@ -109,4 +110,9 @@ public void sendResponse(Exception exception) throws IOException { public Version getVersion() { return delegate.getVersion(); } + + @Override + public Optional get(String name, Class clazz) { + return delegate.get(name, clazz); + } } diff --git a/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java b/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java index 052611317f174..4dab0039ec878 100644 --- a/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java @@ -37,6 +37,7 @@ import org.opensearch.core.transport.TransportResponse; import java.io.IOException; +import java.util.Optional; /** * Transport channel for tasks @@ -89,4 +90,9 @@ public Version getVersion() { public TransportChannel getChannel() { return channel; } + + @Override + public Optional get(String name, Class clazz) { + return getChannel().get(name, clazz); + } } diff --git a/server/src/main/java/org/opensearch/transport/TcpChannel.java b/server/src/main/java/org/opensearch/transport/TcpChannel.java index eac137ec30f1a..f98b65d0a4df1 100644 --- a/server/src/main/java/org/opensearch/transport/TcpChannel.java +++ b/server/src/main/java/org/opensearch/transport/TcpChannel.java @@ -38,6 +38,7 @@ import org.opensearch.core.common.bytes.BytesReference; import java.net.InetSocketAddress; +import java.util.Optional; /** * This is a tcp channel representing a single channel connection to another node. It is the base channel @@ -96,6 +97,20 @@ public interface TcpChannel extends CloseableChannel { */ ChannelStats getChannelStats(); + /** + * Returns the contextual property associated with this specific TCP channel (the + * implementation of how such properties are managed depends on the the particular + * transport engine). + * + * @param name the name of the property + * @param clazz the expected type of the property + * + * @return the value of the property + */ + default Optional get(String name, Class clazz) { + return Optional.empty(); + } + /** * Channel statistics * diff --git a/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java b/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java index a7bedcf93e129..81de0af07ea7c 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java @@ -38,6 +38,7 @@ import org.opensearch.search.query.QuerySearchResult; import java.io.IOException; +import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; @@ -130,4 +131,8 @@ public Version getVersion() { return version; } + @Override + public Optional get(String name, Class clazz) { + return getChannel().get(name, clazz); + } } diff --git a/server/src/main/java/org/opensearch/transport/TransportChannel.java b/server/src/main/java/org/opensearch/transport/TransportChannel.java index 3c582127f28e8..7423d59103302 100644 --- a/server/src/main/java/org/opensearch/transport/TransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TransportChannel.java @@ -39,6 +39,7 @@ import org.opensearch.core.transport.TransportResponse; import java.io.IOException; +import java.util.Optional; /** * A transport channel allows to send a response to a request on the channel. @@ -78,4 +79,18 @@ static void sendErrorResponse(TransportChannel channel, String actionName, Trans ); } } + + /** + * Returns the contextual property associated with this specific transport channel (the + * implementation of how such properties are managed depends on the the particular + * transport engine). + * + * @param name the name of the property + * @param clazz the expected type of the property + * + * @return the value of the property. + */ + default Optional get(String name, Class clazz) { + return Optional.empty(); + } } From a54108c6ef378dbbe432e6248240d4f1afd74047 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 11 Oct 2023 14:04:18 -0400 Subject: [PATCH 18/33] Bump netty from 4.1.99.Final to 4.1.100.Final (#10564) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- .../licenses/netty-buffer-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-buffer-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.100.Final.jar.sha1 | 1 + .../transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.99.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.100.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-handler-proxy-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.100.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.99.Final.jar.sha1 | 1 - .../repository-hdfs/licenses/netty-all-4.1.100.Final.jar.sha1 | 1 + .../repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-buffer-4.1.100.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-codec-4.1.100.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.99.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-common-4.1.100.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-handler-4.1.100.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.100.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.99.Final.jar.sha1 | 1 - .../netty-transport-classes-epoll-4.1.100.Final.jar.sha1 | 1 + .../netty-transport-classes-epoll-4.1.99.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.100.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.99.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.99.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.100.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.100.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.99.Final.jar.sha1 | 1 - 68 files changed, 35 insertions(+), 34 deletions(-) create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-common-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 5e31085eb3935..6055af47403bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) - Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) - Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) +- Bump `netty` from 4.1.99.Final to 4.1.100.Final ([#10564](https://github.com/opensearch-project/OpenSearch/pull/10564)) ### Changed - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index e54a5a1089a93..64f9f9a8828ad 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -28,7 +28,7 @@ jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.99.Final +netty = 4.1.100.Final joda = 2.12.2 # client dependencies diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..aaf2e35302d77 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +39b05d2d4027971bf99111a9be1d7035a116bb55 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 deleted file mode 100644 index 5b393be40e945..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f02dcb9b15a647a56af210dffdc294a57922fb0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..a77333ea8ae47 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +9c3c71e7cf3b8ce3bfc9fa52a524b9ca7ddf259c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 deleted file mode 100644 index 45ea27d29a183..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9984cbd6e5d55c768f198e975d8aaf7fd42a4602 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..6f26bf4e6a9b5 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +992623e7d8f2d96e41faf1687bb963f5433e3517 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6bb7fcd68b272..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7142095066eaebd5f29b88c41af7b383b6a953f6 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..bf5605151406e --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +cbf1a430ea44dbdedbcde16b185cbb95f28d72c7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 deleted file mode 100644 index f9bdefc6dd965..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5a3481c4bb9732a3a94fb63cf916141a1a14669 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..d2ff72db60d1f --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +847f942381145de23f21c836d05b0677474271d3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 deleted file mode 100644 index d53adfa649f5f..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -278f6dfa49d6bd75c40ae1470eb165716f87dce0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..f12a6046e96d0 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +4c0acdb8bb73647ebb3847ac2d503d53d72c02b4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 deleted file mode 100644 index 258f7c957dda0..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -742693761d7ea4c038bccfda96bb38194720b80d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..8e4179ba15942 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +fe62f9ccd41b8660d07639dbbab8ae1edd6f2720 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 deleted file mode 100644 index b8bc0a4370f58..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -080e45397d9d5b134477de3ffd0f94283b908621 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..ab2819da570fd --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +6620fbfb47667a5eb6050e35c7b4c88000bcd77f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 deleted file mode 100644 index 247975e0a64c7..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ca2e3ae19a6713b749df154622115f480b6716c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..30d7758302e37 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +648ff5571022dbfa6789122e3872477bbf67fa7b \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6b7b66ea768e3..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cb0fc6c31c387404212949c57950b5d72ce908b9 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..dfa4a0fbea94c --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +663b1b7bf3ff0f12fde4df20c72d9e94584ebffa \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6c1112ed49775..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21c76a42a468faafac6c84f8aca775073fc8e345 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..bf5605151406e --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +cbf1a430ea44dbdedbcde16b185cbb95f28d72c7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 deleted file mode 100644 index f9bdefc6dd965..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5a3481c4bb9732a3a94fb63cf916141a1a14669 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..8e9bc8c96aec7 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +a9fbf4d64b08abed542eefd5f7aed4807edca56f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 deleted file mode 100644 index 717703c36e1ab..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -259bf1c5178c3e23bb89a2fab59b6d22846e3fa6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..35d9d82202274 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +af3cf676eed30184215426ecf0f0dde15555ea9c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 deleted file mode 100644 index 3f69ae54c5d4a..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8c8a89ea89b06e120c57bdb3db14b9a47ca30bb3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..0948daa05fff6 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +62dbdf5f25eda75ea8456be1ed72b3fcb0d18774 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 deleted file mode 100644 index adef44a4e7da7..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -804d8b752847923d3bb81f24de604597047c9b2e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..30d7758302e37 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +648ff5571022dbfa6789122e3872477bbf67fa7b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6b7b66ea768e3..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cb0fc6c31c387404212949c57950b5d72ce908b9 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.100.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..a9aa34392903e --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +5ef15a3ce29a792b7ad17438e5f84c617b3f2993 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 deleted file mode 100644 index 0756635018837..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a45aa70bc50d0500da5cdcd595cc838d87ada987 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..aaf2e35302d77 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +39b05d2d4027971bf99111a9be1d7035a116bb55 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 deleted file mode 100644 index 5b393be40e945..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f02dcb9b15a647a56af210dffdc294a57922fb0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..a77333ea8ae47 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +9c3c71e7cf3b8ce3bfc9fa52a524b9ca7ddf259c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 deleted file mode 100644 index 45ea27d29a183..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9984cbd6e5d55c768f198e975d8aaf7fd42a4602 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..6f26bf4e6a9b5 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +992623e7d8f2d96e41faf1687bb963f5433e3517 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6bb7fcd68b272..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7142095066eaebd5f29b88c41af7b383b6a953f6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..bf5605151406e --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +cbf1a430ea44dbdedbcde16b185cbb95f28d72c7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 deleted file mode 100644 index f9bdefc6dd965..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5a3481c4bb9732a3a94fb63cf916141a1a14669 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..d2ff72db60d1f --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +847f942381145de23f21c836d05b0677474271d3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 deleted file mode 100644 index d53adfa649f5f..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -278f6dfa49d6bd75c40ae1470eb165716f87dce0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..f12a6046e96d0 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +4c0acdb8bb73647ebb3847ac2d503d53d72c02b4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 deleted file mode 100644 index 258f7c957dda0..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -742693761d7ea4c038bccfda96bb38194720b80d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..8e4179ba15942 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +fe62f9ccd41b8660d07639dbbab8ae1edd6f2720 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 deleted file mode 100644 index b8bc0a4370f58..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -080e45397d9d5b134477de3ffd0f94283b908621 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..ab2819da570fd --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +6620fbfb47667a5eb6050e35c7b4c88000bcd77f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 deleted file mode 100644 index 247975e0a64c7..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ca2e3ae19a6713b749df154622115f480b6716c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..5805fdaf411d1 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +78489936ca1d91483e34a31d04a3b0812386eb39 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 deleted file mode 100644 index 75b64ad4197d8..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -961bd5b8d97ea6a07168176462f398089a24b5c8 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..30d7758302e37 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +648ff5571022dbfa6789122e3872477bbf67fa7b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6b7b66ea768e3..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cb0fc6c31c387404212949c57950b5d72ce908b9 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..aaf2e35302d77 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +39b05d2d4027971bf99111a9be1d7035a116bb55 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 deleted file mode 100644 index 5b393be40e945..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f02dcb9b15a647a56af210dffdc294a57922fb0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..a77333ea8ae47 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +9c3c71e7cf3b8ce3bfc9fa52a524b9ca7ddf259c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 deleted file mode 100644 index 45ea27d29a183..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9984cbd6e5d55c768f198e975d8aaf7fd42a4602 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..6f26bf4e6a9b5 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +992623e7d8f2d96e41faf1687bb963f5433e3517 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 deleted file mode 100644 index 6bb7fcd68b272..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7142095066eaebd5f29b88c41af7b383b6a953f6 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..d2ff72db60d1f --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +847f942381145de23f21c836d05b0677474271d3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 deleted file mode 100644 index d53adfa649f5f..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -278f6dfa49d6bd75c40ae1470eb165716f87dce0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..f12a6046e96d0 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +4c0acdb8bb73647ebb3847ac2d503d53d72c02b4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 deleted file mode 100644 index 258f7c957dda0..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -742693761d7ea4c038bccfda96bb38194720b80d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..8e4179ba15942 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +fe62f9ccd41b8660d07639dbbab8ae1edd6f2720 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 deleted file mode 100644 index b8bc0a4370f58..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -080e45397d9d5b134477de3ffd0f94283b908621 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..ab2819da570fd --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +6620fbfb47667a5eb6050e35c7b4c88000bcd77f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 deleted file mode 100644 index 247975e0a64c7..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ca2e3ae19a6713b749df154622115f480b6716c \ No newline at end of file From f518e9dfd518e1ad31d62ff028b61ab91222204d Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 11 Oct 2023 21:17:10 -0400 Subject: [PATCH 19/33] Update OpenSearch 2.12 to use Apache Lucene 9.8.0 (#10578) Signed-off-by: Andriy Redko --- libs/core/src/main/java/org/opensearch/Version.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 3c210e5c7cf96..eef4da719994c 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -96,7 +96,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_10_0 = new Version(2100099, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_10_1 = new Version(2100199, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_11_0 = new Version(2110099, org.apache.lucene.util.Version.LUCENE_9_7_0); - public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_7_0); + public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_8_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_8_0); public static final Version CURRENT = V_3_0_0; From 90c4297266495de41c3b09728ce82d1a9aec1d64 Mon Sep 17 00:00:00 2001 From: Ashish Date: Thu, 12 Oct 2023 10:26:34 +0530 Subject: [PATCH 20/33] Fix shard failure on flush during upload failures for remote indexes (#10513) Signed-off-by: Ashish Singh --- ...emoteStoreBackpressureAndResiliencyIT.java | 29 +++++++++++++++++++ .../opensearch/index/shard/IndexShard.java | 2 +- .../translog/InternalTranslogManager.java | 6 +++- .../index/translog/TranslogManager.java | 2 +- .../transfer/TranslogTransferManager.java | 4 +-- .../TranslogUploadFailedException.java | 27 +++++++++++++++++ .../index/engine/InternalEngineTests.java | 6 +++- .../index/translog/RemoteFsTranslogTests.java | 3 +- 8 files changed, 72 insertions(+), 7 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/translog/transfer/TranslogUploadFailedException.java diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java index 2c6db6ae19a9a..98586b60dcc69 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java @@ -11,6 +11,7 @@ import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.opensearch.action.admin.indices.flush.FlushResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractAsyncTask; @@ -228,4 +229,32 @@ public void testSkipLoadGlobalCheckpointToReplicationTracker() { client().admin().cluster().prepareReroute().setRetryFailed(true).get(); ensureGreen(INDEX_NAME); } + + public void testFlushDuringRemoteUploadFailures() { + Path location = randomRepoPath().toAbsolutePath(); + String dataNodeName = setup(location, 0d, "metadata", Long.MAX_VALUE); + + logger.info("--> Indexing data"); + indexData(randomIntBetween(1, 2), true); + logger.info("--> Indexing succeeded"); + ensureGreen(INDEX_NAME); + + MockRepository translogRepo = (MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName) + .repository(TRANSLOG_REPOSITORY_NAME); + logger.info("--> Failing all remote store interaction"); + translogRepo.setRandomControlIOExceptionRate(1d); + + Exception ex = assertThrows(UncategorizedExecutionException.class, () -> indexSingleDoc()); + assertEquals("Failed execution", ex.getMessage()); + + FlushResponse flushResponse = client().admin().indices().prepareFlush(INDEX_NAME).setForce(true).execute().actionGet(); + assertEquals(1, flushResponse.getFailedShards()); + ensureGreen(INDEX_NAME); + + logger.info("--> Stop failing all remote store interactions"); + translogRepo.setRandomControlIOExceptionRate(0d); + flushResponse = client().admin().indices().prepareFlush(INDEX_NAME).setForce(true).execute().actionGet(); + assertEquals(1, flushResponse.getSuccessfulShards()); + assertEquals(0, flushResponse.getFailedShards()); + } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 6ac75c7bfec17..9489c7d7fc1dd 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -1475,7 +1475,7 @@ public void trimTranslog() { /** * Rolls the tranlog generation and cleans unneeded. */ - public void rollTranslogGeneration() { + public void rollTranslogGeneration() throws IOException { final Engine engine = getEngine(); engine.translogManager().rollTranslogGeneration(); } diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java index 4bc9a711894b7..85c52b907d326 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java @@ -18,6 +18,7 @@ import org.opensearch.index.engine.LifecycleAware; import org.opensearch.index.seqno.LocalCheckpointTracker; import org.opensearch.index.translog.listener.TranslogEventListener; +import org.opensearch.index.translog.transfer.TranslogUploadFailedException; import java.io.Closeable; import java.io.IOException; @@ -83,11 +84,14 @@ public InternalTranslogManager( * Rolls the translog generation and cleans unneeded. */ @Override - public void rollTranslogGeneration() throws TranslogException { + public void rollTranslogGeneration() throws TranslogException, IOException { try (ReleasableLock ignored = readLock.acquire()) { engineLifeCycleAware.ensureOpen(); translog.rollGeneration(); translog.trimUnreferencedReaders(); + } catch (TranslogUploadFailedException e) { + // Do not trigger the translogEventListener as it fails the Engine while this is only an issue with remote upload + throw e; } catch (AlreadyClosedException e) { translogEventListener.onFailure("translog roll generation failed", e); throw e; diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java index 78aaa1bc13a00..148fd67fb413e 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java @@ -21,7 +21,7 @@ public interface TranslogManager { /** * Rolls the translog generation and cleans unneeded. */ - void rollTranslogGeneration() throws TranslogException; + void rollTranslogGeneration() throws TranslogException, IOException; /** * Performs recovery from the transaction log up to {@code recoverUpToSeqNo} (inclusive). diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index d988b8a6254ff..ece6f6d5a534f 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -176,7 +176,7 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans remoteTranslogTransferTracker.addUploadTimeInMillis((System.nanoTime() - metadataUploadStartTime) / 1_000_000L); remoteTranslogTransferTracker.addUploadBytesFailed(metadataBytesToUpload); // outer catch handles capturing stats on upload failure - throw exception; + throw new TranslogUploadFailedException("Failed to upload " + tlogMetadata.getName(), exception); } remoteTranslogTransferTracker.addUploadTimeInMillis((System.nanoTime() - metadataUploadStartTime) / 1_000_000L); @@ -185,7 +185,7 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans translogTransferListener.onUploadComplete(transferSnapshot); return true; } else { - Exception ex = new IOException("Failed to upload " + exceptionList.size() + " files during transfer"); + Exception ex = new TranslogUploadFailedException("Failed to upload " + exceptionList.size() + " files during transfer"); exceptionList.forEach(ex::addSuppressed); throw ex; } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogUploadFailedException.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogUploadFailedException.java new file mode 100644 index 0000000000000..4a9b10ec5a52e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogUploadFailedException.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import java.io.IOException; + +/** + * Exception is thrown if there are any exceptions while uploading translog to remote store. + * @opensearch.internal + */ +public class TranslogUploadFailedException extends IOException { + + public TranslogUploadFailedException(String message) { + super(message); + } + + public TranslogUploadFailedException(String message, Throwable cause) { + super(message, cause); + } + +} diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index b8bb73bb89a82..305c3a3acbf75 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -7256,7 +7256,11 @@ public void testMaxSeqNoInCommitUserData() throws Exception { engine.ensureOpen(); while (running.get() && assertAndGetInternalTranslogManager(engine.translogManager()).getTranslog().currentFileGeneration() < 500) { - engine.translogManager().rollTranslogGeneration(); // make adding operations to translog slower + try { + engine.translogManager().rollTranslogGeneration(); // make adding operations to translog slower + } catch (IOException e) { + fail("io exception not expected"); + } } }); rollTranslog.start(); diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index b2310010620f7..42e0df2dc90c1 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -47,6 +47,7 @@ import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.index.translog.transfer.TranslogTransferManager; import org.opensearch.index.translog.transfer.TranslogTransferMetadata; +import org.opensearch.index.translog.transfer.TranslogUploadFailedException; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -1113,7 +1114,7 @@ public void testSyncUpAlwaysFailure() throws IOException { try { translog.sync(); fail("io exception expected"); - } catch (IOException e) { + } catch (TranslogUploadFailedException e) { assertTrue("at least one operation pending", translog.syncNeeded()); } } From 9c06228f2e39b3a7b46e9eb26aa2368dcbf4116a Mon Sep 17 00:00:00 2001 From: Varun Bansal Date: Thu, 12 Oct 2023 16:09:11 +0530 Subject: [PATCH 21/33] Fix system_repository setting is restored after cluster metadata recovery from local disk (#10556) Signed-off-by: bansvaru --- .../RemoteStoreRepositoryRegistrationIT.java | 19 +++++++++++++++++++ .../get/GetRepositoriesResponse.java | 7 +++++-- .../metadata/RepositoriesMetadata.java | 3 ++- 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java index 002a149f0c286..ef2dcf3217df6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java @@ -8,21 +8,31 @@ package org.opensearch.remotestore; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesAction; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.transport.MockTransportService; +import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; + @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreRepositoryRegistrationIT extends RemoteStoreBaseIntegTestCase { @@ -162,4 +172,13 @@ public void testNodeRestartPostNonRestrictedSettingsUpdate() throws Exception { ensureStableCluster(4); } + + public void testSystemRepositorySettingIsHiddenForGetRepositoriesRequest() throws IOException { + GetRepositoriesRequest request = new GetRepositoriesRequest(new String[] { REPOSITORY_NAME }); + GetRepositoriesResponse repositoriesResponse = client().execute(GetRepositoriesAction.INSTANCE, request).actionGet(); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.JSON)); + XContentBuilder xContentBuilder = repositoriesResponse.toXContent(builder, ToXContent.EMPTY_PARAMS); + repositoriesResponse = GetRepositoriesResponse.fromXContent(createParser(xContentBuilder)); + assertEquals(false, SYSTEM_REPOSITORY_SETTING.get(repositoriesResponse.repositories().get(0).settings())); + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java index 098a0e60142e7..f8c8df25be532 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java @@ -42,8 +42,8 @@ import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; -import java.util.Collections; import java.util.List; +import java.util.Map; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; @@ -83,7 +83,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); repositories.toXContent( builder, - new DelegatingMapParams(Collections.singletonMap(RepositoriesMetadata.HIDE_GENERATIONS_PARAM, "true"), params) + new DelegatingMapParams( + Map.of(RepositoriesMetadata.HIDE_GENERATIONS_PARAM, "true", RepositoriesMetadata.HIDE_SYSTEM_REPOSITORY_SETTING, "true"), + params + ) ); builder.endObject(); return builder; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java index a5ef337c3b62a..e3689d046193c 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java @@ -70,6 +70,7 @@ public class RepositoriesMetadata extends AbstractNamedDiffable implemen * in {@link org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse}. */ public static final String HIDE_GENERATIONS_PARAM = "hide_generations"; + public static final String HIDE_SYSTEM_REPOSITORY_SETTING = "hide_system_repository_setting"; private final List repositories; @@ -291,7 +292,7 @@ public static void toXContent(RepositoryMetadata repository, XContentBuilder bui repository.cryptoMetadata().toXContent(repository.cryptoMetadata(), builder, params); } Settings settings = repository.settings(); - if (SYSTEM_REPOSITORY_SETTING.get(settings)) { + if (SYSTEM_REPOSITORY_SETTING.get(settings) && params.paramAsBoolean(HIDE_SYSTEM_REPOSITORY_SETTING, false)) { settings = repository.settings().filter(s -> !s.equals(SYSTEM_REPOSITORY_SETTING.getKey())); } builder.startObject("settings"); From 6c022612769e6bc7ed18b9ec0888d6cd0dd415cc Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Thu, 12 Oct 2023 16:11:56 +0530 Subject: [PATCH 22/33] Validate checksum of each segment file post download from remote store (#10119) --------- Signed-off-by: Sachin Kale Co-authored-by: Sachin Kale --- .../remotestore/RemoteStoreRestoreIT.java | 33 ++++++++++++- .../store/RemoteSegmentStoreDirectory.java | 4 ++ .../org/opensearch/index/store/Store.java | 47 ++++++++++++++++++- 3 files changed, 81 insertions(+), 3 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java index 7626e3dba6424..212f797180077 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java @@ -10,8 +10,11 @@ import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; +import org.opensearch.action.admin.indices.get.GetIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; @@ -19,10 +22,12 @@ import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; +import org.opensearch.test.CorruptionUtils; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; import java.util.HashMap; import java.util.Locale; @@ -30,13 +35,14 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.greaterThan; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 0) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreRestoreIT extends BaseRemoteStoreRestoreIT { /** @@ -461,5 +467,30 @@ public void testRateLimitedRemoteDownloads() throws Exception { } } + public void testRestoreCorruptSegmentShouldFail() throws IOException, ExecutionException, InterruptedException { + prepareCluster(1, 3, INDEX_NAME, 0, 1); + indexData(randomIntBetween(3, 4), true, INDEX_NAME); + + GetIndexResponse getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest()).get(); + String indexUUID = getIndexResponse.getSettings().get(INDEX_NAME).get(IndexMetadata.SETTING_INDEX_UUID); + + logger.info("--> Corrupting segment files in remote segment store"); + Path path = segmentRepoPath.resolve(indexUUID).resolve("0").resolve("segments").resolve("data"); + try (Stream dataPath = Files.list(path)) { + CorruptionUtils.corruptFile(random(), dataPath.toArray(Path[]::new)); + } + + logger.info("--> Stop primary"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); + + logger.info("--> Close and restore the index"); + client().admin() + .cluster() + .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME).waitForCompletion(true), PlainActionFuture.newFuture()); + + logger.info("--> Check for index status, should be red due to corruption"); + ensureRed(INDEX_NAME); + } + // TODO: Restore flow - index aliases } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index be1f2341236ab..6b43fed3d8930 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -290,6 +290,10 @@ public void setWrittenByMajor(int writtenByMajor) { ); } } + + public int getWrittenByMajor() { + return writtenByMajor; + } } /** diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index b822742de6e97..d0cd2635ba672 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -105,6 +105,7 @@ import java.io.UncheckedIOException; import java.nio.file.NoSuchFileException; import java.nio.file.Path; +import java.text.ParseException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -120,6 +121,7 @@ import java.util.zip.CRC32; import java.util.zip.Checksum; +import static java.lang.Character.MAX_RADIX; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; @@ -975,7 +977,11 @@ public void copyFrom(Directory from, String src, String dest, IOContext context) boolean success = false; long startTime = System.currentTimeMillis(); try { - super.copyFrom(from, src, dest, context); + if (from instanceof RemoteSegmentStoreDirectory) { + copyFileAndValidateChecksum(from, src, dest, context, fileSize); + } else { + super.copyFrom(from, src, dest, context); + } success = true; afterDownload(fileSize, startTime); } finally { @@ -985,6 +991,43 @@ public void copyFrom(Directory from, String src, String dest, IOContext context) } } + private void copyFileAndValidateChecksum(Directory from, String src, String dest, IOContext context, long fileSize) + throws IOException { + RemoteSegmentStoreDirectory.UploadedSegmentMetadata metadata = ((RemoteSegmentStoreDirectory) from) + .getSegmentsUploadedToRemoteStore() + .get(dest); + boolean success = false; + try (IndexInput is = from.openInput(src, context); IndexOutput os = createOutput(dest, context)) { + // Here, we don't need the exact version as LuceneVerifyingIndexOutput does not verify version + // It is just used to emit logs when the entire metadata object is provided as parameter. Also, + // we can't provide null version as StoreFileMetadata has non-null check on writtenBy field. + Version luceneMajorVersion = Version.parse(metadata.getWrittenByMajor() + ".0.0"); + long checksum = Long.parseLong(metadata.getChecksum()); + StoreFileMetadata storeFileMetadata = new StoreFileMetadata( + dest, + fileSize, + Long.toString(checksum, MAX_RADIX), + luceneMajorVersion + ); + VerifyingIndexOutput verifyingIndexOutput = new LuceneVerifyingIndexOutput(storeFileMetadata, os); + verifyingIndexOutput.copyBytes(is, is.length()); + verifyingIndexOutput.verify(); + success = true; + } catch (ParseException e) { + throw new IOException("Exception while reading version info for segment file from remote store: " + dest, e); + } finally { + if (success == false) { + // If the exception is thrown after file is created, we clean up the file. + // We ignore the exception as the deletion is best-effort basis and can fail if file does not exist. + try { + deleteFile("Quietly deleting", dest); + } catch (Exception e) { + // Ignore + } + } + } + } + /** * Updates the amount of bytes attempted for download */ @@ -1476,7 +1519,7 @@ public static boolean isAutogenerated(String name) { * Produces a string representation of the given digest value. */ public static String digestToString(long digest) { - return Long.toString(digest, Character.MAX_RADIX); + return Long.toString(digest, MAX_RADIX); } /** From 382eb301b21bf5c6eda613432b9ddccda1c414b9 Mon Sep 17 00:00:00 2001 From: Louis Chu Date: Thu, 12 Oct 2023 17:31:25 -0400 Subject: [PATCH 23/33] Add release notes for 2.11 (#10594) Signed-off-by: Louis Chu --- CHANGELOG.md | 65 +--------------- .../opensearch.release-notes-2.11.0.md | 75 +++++++++++++++++++ 2 files changed, 76 insertions(+), 64 deletions(-) create mode 100644 release-notes/opensearch.release-notes-2.11.0.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 6055af47403bc..a9e5bb3982708 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,7 +33,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.code.gson:gson` from 2.10 to 2.10.1 - Bump `com.maxmind.geoip2:geoip2` from 4.0.0 to 4.0.1 - Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.11 to 0.16.12 -- Bump `org.apache.commons:commons-compress` from 1.22 to 1.23.0 - Bump `org.apache.commons:commons-configuration2` from 2.8.0 to 2.9.0 - Bump `com.netflix.nebula:nebula-publishing-plugin` from 19.2.0 to 20.3.0 - Bump `io.opencensus:opencensus-api` from 0.18.0 to 0.31.1 ([#7291](https://github.com/opensearch-project/OpenSearch/pull/7291)) @@ -46,7 +45,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) - Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) - Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) -- Bump `netty` from 4.1.99.Final to 4.1.100.Final ([#10564](https://github.com/opensearch-project/OpenSearch/pull/10564)) ### Changed - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) @@ -86,79 +84,18 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added -- Add coordinator level stats for search latency ([#8386](https://github.com/opensearch-project/OpenSearch/issues/8386)) -- Add metrics for thread_pool task wait time ([#9681](https://github.com/opensearch-project/OpenSearch/pull/9681)) -- Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694)) -- [Telemetry-Otel] Added support for OtlpGrpcSpanExporter exporter ([#9666](https://github.com/opensearch-project/OpenSearch/pull/9666)) -- Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131)) -- Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189)) -- Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562)) -- Add unreferenced file cleanup count to merge stats ([#10204](https://github.com/opensearch-project/OpenSearch/pull/10204)) -- [Remote Store] Add support to restrict creation & deletion if system repository and mutation of immutable settings of system repository ([#9839](https://github.com/opensearch-project/OpenSearch/pull/9839)) -- Improve compressed request handling ([#10261](https://github.com/opensearch-project/OpenSearch/pull/10261)) ### Dependencies -- Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) -- Bump `actions/checkout` from 2 to 4 ([#9968](https://github.com/opensearch-project/OpenSearch/pull/9968)) -- Bump OpenTelemetry from 1.26.0 to 1.30.1 ([#9950](https://github.com/opensearch-project/OpenSearch/pull/9950)) -- Bump `org.apache.commons:commons-compress` from 1.23.0 to 1.24.0 ([#9973, #9972](https://github.com/opensearch-project/OpenSearch/pull/9973, https://github.com/opensearch-project/OpenSearch/pull/9972)) -- Bump `com.google.cloud:google-cloud-core-http` from 2.21.1 to 2.23.0 ([#9971](https://github.com/opensearch-project/OpenSearch/pull/9971)) -- Bump `mockito` from 5.4.0 to 5.5.0 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) -- Bump `bytebuddy` from 1.14.3 to 1.14.7 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) -- Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098)) -- Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125)) -- Bump `org.wiremock:wiremock-standalone` from 2.35.0 to 3.1.0 ([#9752](https://github.com/opensearch-project/OpenSearch/pull/9752)) -- Bump `com.google.http-client:google-http-client-jackson2` from 1.43.2 to 1.43.3 ([#10126](https://github.com/opensearch-project/OpenSearch/pull/10126)) -- Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.5 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206), [#10299](https://github.com/opensearch-project/OpenSearch/pull/10299)) -- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) -- Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) -- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) -- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.5.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295)) -- Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) -- Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) -- Bump `peter-evans/create-pull-request` from 3 to 5 ([#10301](https://github.com/opensearch-project/OpenSearch/pull/10301)) -- Bump `org.apache.avro:avro` from 1.11.2 to 1.11.3 ([#10210](https://github.com/opensearch-project/OpenSearch/pull/10210)) -- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) -- Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) -- Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) -- Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294)) ### Changed -- Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) -- Allow parameterization of tests with OpenSearchIntegTestCase.SuiteScopeTestCase annotation ([#9916](https://github.com/opensearch-project/OpenSearch/pull/9916)) -- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) -- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) -- Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) -- [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) -- Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246)) -- Disable concurrent segment search when terminate_after is used ([#10200](https://github.com/opensearch-project/OpenSearch/pull/10200)) -- Add instrumentation in Inbound Handler. ([#100143](https://github.com/opensearch-project/OpenSearch/pull/10143)) -- Enable remote segment upload backpressure by default ([#10356](https://github.com/opensearch-project/OpenSearch/pull/10356)) -- [Remote Store] Add support to reload repository metadata inplace ([#9569](https://github.com/opensearch-project/OpenSearch/pull/9569)) -- [Metrics Framework] Add Metrics framework. ([#10241](https://github.com/opensearch-project/OpenSearch/pull/10241)) -- Updating the separator for RemoteStoreLockManager since underscore is allowed in base64UUID url charset ([#10379](https://github.com/opensearch-project/OpenSearch/pull/10379)) -- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)) ### Deprecated ### Removed -- Remove spurious SGID bit on directories ([#9447](https://github.com/opensearch-project/OpenSearch/pull/9447)) ### Fixed -- Fix ignore_missing parameter has no effect when using template snippet in rename ingest processor ([#9725](https://github.com/opensearch-project/OpenSearch/pull/9725)) -- Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045)) -- Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) -- Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) -- Fix class_cast_exception when passing int to _version and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) -- Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) -- Fix registration and initialization of multiple extensions ([10256](https://github.com/opensearch-project/OpenSearch/pull/10256)) -- Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) -- Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x diff --git a/release-notes/opensearch.release-notes-2.11.0.md b/release-notes/opensearch.release-notes-2.11.0.md new file mode 100644 index 0000000000000..d7e9182f2a656 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.11.0.md @@ -0,0 +1,75 @@ +## 2023-10-12 Version 2.11.0 Release Notes + +## [2.11] + +### Added +- Add coordinator level stats for search latency ([#8386](https://github.com/opensearch-project/OpenSearch/issues/8386)) +- Add metrics for thread_pool task wait time ([#9681](https://github.com/opensearch-project/OpenSearch/pull/9681)) +- Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694)) +- [Telemetry-Otel] Added support for OtlpGrpcSpanExporter exporter ([#9666](https://github.com/opensearch-project/OpenSearch/pull/9666)) +- Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131)) +- Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189)) +- Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562)) +- Add unreferenced file cleanup count to merge stats ([#10204](https://github.com/opensearch-project/OpenSearch/pull/10204)) +- [Remote Store] Add support to restrict creation & deletion if system repository and mutation of immutable settings of system repository ([#9839](https://github.com/opensearch-project/OpenSearch/pull/9839)) +- Improve compressed request handling ([#10261](https://github.com/opensearch-project/OpenSearch/pull/10261)) + +### Dependencies +- Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) +- Bump `actions/checkout` from 2 to 4 ([#9968](https://github.com/opensearch-project/OpenSearch/pull/9968)) +- Bump OpenTelemetry from 1.26.0 to 1.30.1 ([#9950](https://github.com/opensearch-project/OpenSearch/pull/9950)) +- Bump `org.apache.commons:commons-compress` from 1.23.0 to 1.24.0 ([#9973, #9972](https://github.com/opensearch-project/OpenSearch/pull/9973, https://github.com/opensearch-project/OpenSearch/pull/9972)) +- Bump `com.google.cloud:google-cloud-core-http` from 2.21.1 to 2.23.0 ([#9971](https://github.com/opensearch-project/OpenSearch/pull/9971)) +- Bump `mockito` from 5.4.0 to 5.5.0 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) +- Bump `bytebuddy` from 1.14.3 to 1.14.7 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) +- Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098)) +- Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125)) +- Bump `org.wiremock:wiremock-standalone` from 2.35.0 to 3.1.0 ([#9752](https://github.com/opensearch-project/OpenSearch/pull/9752)) +- Bump `com.google.http-client:google-http-client-jackson2` from 1.43.2 to 1.43.3 ([#10126](https://github.com/opensearch-project/OpenSearch/pull/10126)) +- Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.5 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206), [#10299](https://github.com/opensearch-project/OpenSearch/pull/10299)) +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) +- Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) +- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) +- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.5.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295)) +- Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) +- Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) +- Bump `peter-evans/create-pull-request` from 3 to 5 ([#10301](https://github.com/opensearch-project/OpenSearch/pull/10301)) +- Bump `org.apache.avro:avro` from 1.11.2 to 1.11.3 ([#10210](https://github.com/opensearch-project/OpenSearch/pull/10210)) +- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) +- Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) +- Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) +- Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294)) +- Bump `netty` from 4.1.99.Final to 4.1.100.Final ([#10564](https://github.com/opensearch-project/OpenSearch/pull/10564)) + +### Changed +- Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) +- Allow parameterization of tests with OpenSearchIntegTestCase.SuiteScopeTestCase annotation ([#9916](https://github.com/opensearch-project/OpenSearch/pull/9916)) +- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) +- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) +- Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) +- [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) +- Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246)) +- Disable concurrent segment search when terminate_after is used ([#10200](https://github.com/opensearch-project/OpenSearch/pull/10200)) +- Add instrumentation in Inbound Handler. ([#100143](https://github.com/opensearch-project/OpenSearch/pull/10143)) +- Enable remote segment upload backpressure by default ([#10356](https://github.com/opensearch-project/OpenSearch/pull/10356)) +- [Remote Store] Add support to reload repository metadata inplace ([#9569](https://github.com/opensearch-project/OpenSearch/pull/9569)) +- [Metrics Framework] Add Metrics framework. ([#10241](https://github.com/opensearch-project/OpenSearch/pull/10241)) +- Updating the separator for RemoteStoreLockManager since underscore is allowed in base64UUID url charset ([#10379](https://github.com/opensearch-project/OpenSearch/pull/10379)) +- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)) + +### Removed +- Remove spurious SGID bit on directories ([#9447](https://github.com/opensearch-project/OpenSearch/pull/9447)) + +### Fixed +- Fix ignore_missing parameter has no effect when using template snippet in rename ingest processor ([#9725](https://github.com/opensearch-project/OpenSearch/pull/9725)) +- Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045)) +- Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) +- Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) +- Fix class_cast_exception when passing int to _version and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) +- Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) +- Fix registration and initialization of multiple extensions ([10256](https://github.com/opensearch-project/OpenSearch/pull/10256)) +- Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) +- Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) From 1447d75e5e9ea042dd4bb645d71f25aea1fb42a8 Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Thu, 12 Oct 2023 15:30:02 -0700 Subject: [PATCH 24/33] Point deprecated settings to the new location. (#10522) I was looking through some cluster settings, stumbled across the search back-pressure settings, saw they were deprecated, and the Javadoc alluded to some myserious new settings. I'm adding links to the class with the new settings to help future developers. Signed-off-by: Michael Froh --- .../backpressure/settings/SearchBackpressureSettings.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java index 4c28d96d8289e..79494eb0d3c24 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java +++ b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java @@ -55,7 +55,7 @@ private static class Defaults { * Defines the percentage of tasks to cancel relative to the number of successful task completions. * In other words, it is the number of tokens added to the bucket on each successful task completion. *

                              - * The setting below is deprecated. + * The setting below is deprecated. The new setting is in {@link SearchShardTaskSettings}. * To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. */ public static final Setting SETTING_CANCELLATION_RATIO = Setting.doubleSetting( @@ -72,7 +72,7 @@ private static class Defaults { * Defines the number of tasks to cancel per unit time (in millis). * In other words, it is the number of tokens added to the bucket each millisecond. *

                              - * The setting below is deprecated. + * The setting below is deprecated. The new setting is in {@link SearchShardTaskSettings}. * To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. */ public static final Setting SETTING_CANCELLATION_RATE = Setting.doubleSetting( @@ -87,7 +87,7 @@ private static class Defaults { /** * Defines the maximum number of tasks that can be cancelled before being rate-limited. *

                              - * The setting below is deprecated. + * The setting below is deprecated. The new setting is in {@link SearchShardTaskSettings}. * To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. */ public static final Setting SETTING_CANCELLATION_BURST = Setting.doubleSetting( From 25f2b769f85c24258e2c49cc0d6b09d6b2500b99 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 13 Oct 2023 17:03:05 -0400 Subject: [PATCH 25/33] Bump OpenTelemetry from 1.30.1 to 1.31.0 (#10617) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 3 ++- plugins/telemetry-otel/build.gradle | 2 +- .../telemetry-otel/licenses/opentelemetry-api-1.30.1.jar.sha1 | 1 - .../telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 | 1 + .../licenses/opentelemetry-context-1.30.1.jar.sha1 | 1 - .../licenses/opentelemetry-context-1.31.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-common-1.30.1.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-logging-1.30.1.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-otlp-1.30.1.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 | 1 + .../opentelemetry-exporter-otlp-common-1.30.1.jar.sha1 | 1 - .../opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 | 1 + .../opentelemetry-exporter-sender-okhttp-1.30.1.jar.sha1 | 1 - .../opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 | 1 + .../opentelemetry-extension-incubator-1.30.1-alpha.jar.sha1 | 1 - .../opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 | 1 + .../telemetry-otel/licenses/opentelemetry-sdk-1.30.1.jar.sha1 | 1 - .../telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-common-1.30.1.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-logs-1.30.1.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-metrics-1.30.1.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-trace-1.30.1.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 | 1 + .../licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 | 1 + .../licenses/opentelemetry-semconv-1.30.1-alpha.jar.sha1 | 1 - .../org/opensearch/telemetry/tracing/OTelResourceProvider.java | 2 +- 32 files changed, 19 insertions(+), 17 deletions(-) delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.30.1-alpha.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.30.1.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-semconv-1.30.1-alpha.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index a9e5bb3982708..51f477fc38da8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) - Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) - Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) +- Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617)) ### Changed - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 64f9f9a8828ad..a5171aa582a86 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -68,4 +68,5 @@ jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.30.1 +opentelemetry = 1.31.0 +opentelemetrysemconv = 1.21.0-alpha diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index 04fff20947b4f..f5c367cb7643b 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -28,7 +28,7 @@ dependencies { api "io.opentelemetry:opentelemetry-sdk-trace:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-sdk-metrics:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-exporter-logging:${versions.opentelemetry}" - api "io.opentelemetry:opentelemetry-semconv:${versions.opentelemetry}-alpha" + api "io.opentelemetry.semconv:opentelemetry-semconv:${versions.opentelemetrysemconv}" api "io.opentelemetry:opentelemetry-sdk-logs:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-exporter-otlp:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-exporter-common:${versions.opentelemetry}" diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.30.1.jar.sha1 deleted file mode 100644 index b0ce00e191830..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a32dfbd7f01de6711fd0e970f8d4b4c0405056d6 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..eae141a8d1a23 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 @@ -0,0 +1 @@ +bb24a44d73484c681c236aed84fe6c28d17f30e2 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.30.1.jar.sha1 deleted file mode 100644 index 84cb60a2f7acb..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58f665ff01ce6b964cdf0b8cb5cd1c196dfe94ce \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..6e42973adc581 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 @@ -0,0 +1 @@ +b8004737f7a970124e36ac71fde8eb88423e8cee \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.30.1.jar.sha1 deleted file mode 100644 index eccb15f7b7c8e..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f299d336dba1039478497f37b273dfa764c6faef \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..b119468e7f88b --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 @@ -0,0 +1 @@ +b7b4baf5f9af72d5eb8a231dfb114ae31c57150d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.30.1.jar.sha1 deleted file mode 100644 index 40537a399ab14..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58f1a09e89955e6145babf8bcdf80c95174eb817 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..8f653922d6418 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 @@ -0,0 +1 @@ +260e5363dad83a0ae65c16ad6a3dd2914e0db201 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.30.1.jar.sha1 deleted file mode 100644 index e88b7514ee54d..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -15692246539571c41180aff2b55abe527b939a7b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..103da4720de96 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 @@ -0,0 +1 @@ +b6454464425dfd81519070caeca3824558a2f1ae \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.30.1.jar.sha1 deleted file mode 100644 index 86937743208c6..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -947cf43a6411c4a323e14594431040a476ad43e8 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..3db07532ceea9 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 @@ -0,0 +1 @@ +d8c22b6851bbc3dbf5d2387b9bde158ed5416ba4 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.30.1.jar.sha1 deleted file mode 100644 index 068926277253c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f3a14515500e4df260ce7b10a668237a95ac791 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..10d9b7cdfe3e3 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 @@ -0,0 +1 @@ +dd209381d58cfe81a989e29c9ca26d97c8dabd7a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.30.1-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.30.1-alpha.jar.sha1 deleted file mode 100644 index bde43937e82e4..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.30.1-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bfcea9bd71f97dd4e8a4f92c15ba5659fb07ff05 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..162890965a6eb --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 @@ -0,0 +1 @@ +6c9f5c063309d92b6dd28bff0667f54b63afd36f \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.30.1.jar.sha1 deleted file mode 100644 index d425ed61cc4cd..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d15a9ea26e8e6ea93287a9f4ee02d91e5a74392 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..d6ce31a31cc6f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 @@ -0,0 +1 @@ +2b2093be08a09ac536292bf6cecf8129cc7fb191 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.30.1.jar.sha1 deleted file mode 100644 index 6b32d98b0f7c7..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e437ba87004bb63069d04fb06beae65b98dd13a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..8a6a9705d836d --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 @@ -0,0 +1 @@ +f492528288236e097e12fc1c45963dd82c70d33c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.30.1.jar.sha1 deleted file mode 100644 index 13ef6de11e82d..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5985d0950746ad12b49cc42c063f26ddfbcaaacb \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..37d79f5c573f7 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 @@ -0,0 +1 @@ +a63a203d3dc6f8875f8c26b9e3b522dc9a3f6280 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.30.1.jar.sha1 deleted file mode 100644 index fc5aad9c9011e..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b12825541c5dae52a0fb35045c1b36df3ca8f632 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..80179e4808f50 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 @@ -0,0 +1 @@ +47cc23762fae728d68e4fda1dfb71986ae0b8b3e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.30.1.jar.sha1 deleted file mode 100644 index ac522b765da05..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c5531fbc44178a7bcfeb7021ae80e70a7c43458 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..fd917a58ba77c --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 @@ -0,0 +1 @@ +a3941197cfb8ae9eb9e482073480c0c3918b746c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..77b12c99464f6 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 @@ -0,0 +1 @@ +207660e74d1e155272e9559fd4d27854b92fc6ac \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.30.1-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.30.1-alpha.jar.sha1 deleted file mode 100644 index 089a2484dd1d5..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.30.1-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e8f7a97a4896a81846553275b9d61885be7ef50 \ No newline at end of file diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java index a6a1f12aab8a9..14a19f122c17b 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java @@ -30,7 +30,7 @@ import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; import io.opentelemetry.sdk.trace.export.SpanExporter; import io.opentelemetry.sdk.trace.samplers.Sampler; -import io.opentelemetry.semconv.resource.attributes.ResourceAttributes; +import io.opentelemetry.semconv.ResourceAttributes; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_BATCH_SIZE_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING; From daf1350888f878868748172f576a0cdb3dc64b33 Mon Sep 17 00:00:00 2001 From: David Z <38449481+dzane17@users.noreply.github.com> Date: Fri, 13 Oct 2023 14:12:46 -0700 Subject: [PATCH 26/33] Per request phase latency (#10351) Signed-off-by: David Zane --- CHANGELOG.md | 1 + .../search/AbstractSearchAsyncAction.java | 2 + .../action/search/MultiSearchRequest.java | 5 + .../action/search/SearchRequest.java | 31 +++- .../action/search/SearchResponse.java | 132 +++++++++++++++++- .../action/search/SearchResponseMerger.java | 1 + .../action/search/TransportSearchAction.java | 129 +++++++++++++---- .../common/settings/ClusterSettings.java | 1 + .../rest/action/search/RestSearchAction.java | 6 + .../AbstractSearchAsyncActionTests.java | 15 +- .../action/search/SearchRequestTests.java | 1 + .../action/search/SearchResponseTests.java | 47 ++++++- .../search/SearchTimeProviderTests.java | 54 +++++++ .../search/RandomSearchRequestGenerator.java | 3 + 14 files changed, 394 insertions(+), 34 deletions(-) create mode 100644 server/src/test/java/org/opensearch/action/search/SearchTimeProviderTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 51f477fc38da8..a53d31c7861ff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -85,6 +85,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) ### Dependencies diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index 1c0a1280ad550..14f57218ae1dc 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -215,6 +215,7 @@ public final void start() { 0, 0, buildTookInMillis(), + timeProvider.getPhaseTook(), ShardSearchFailure.EMPTY_ARRAY, clusters, null @@ -662,6 +663,7 @@ protected final SearchResponse buildSearchResponse( successfulOps.get(), skippedOps.get(), buildTookInMillis(), + timeProvider.getPhaseTook(), failures, clusters, searchContextId diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java index da8f8f144eaf2..00e0345062d1c 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java @@ -277,6 +277,8 @@ public static void readMultiLineFormat( } else if ("cancel_after_time_interval".equals(entry.getKey()) || "cancelAfterTimeInterval".equals(entry.getKey())) { searchRequest.setCancelAfterTimeInterval(nodeTimeValue(value, null)); + } else if ("phase_took".equals(entry.getKey())) { + searchRequest.setPhaseTook(nodeBooleanValue(value)); } else { throw new IllegalArgumentException("key [" + entry.getKey() + "] is not supported in the metadata section"); } @@ -374,6 +376,9 @@ public static void writeSearchRequestParams(SearchRequest request, XContentBuild if (request.getCancelAfterTimeInterval() != null) { xContentBuilder.field("cancel_after_time_interval", request.getCancelAfterTimeInterval().getStringRep()); } + if (request.isPhaseTook() != null) { + xContentBuilder.field("phase_took", request.isPhaseTook()); + } xContentBuilder.endObject(); } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java index 21cf0ed97b9da..9e50213eab5f9 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java @@ -117,6 +117,8 @@ public class SearchRequest extends ActionRequest implements IndicesRequest.Repla private String pipeline; + private Boolean phaseTook = null; + public SearchRequest() { this.localClusterAlias = null; this.absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; @@ -209,6 +211,7 @@ private SearchRequest( this.absoluteStartMillis = absoluteStartMillis; this.finalReduce = finalReduce; this.cancelAfterTimeInterval = searchRequest.cancelAfterTimeInterval; + this.phaseTook = searchRequest.phaseTook; } /** @@ -253,6 +256,9 @@ public SearchRequest(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_2_7_0)) { pipeline = in.readOptionalString(); } + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + phaseTook = in.readOptionalBoolean(); + } } @Override @@ -284,6 +290,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_7_0)) { out.writeOptionalString(pipeline); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalBoolean(phaseTook); + } } @Override @@ -615,6 +624,20 @@ public void setPreFilterShardSize(int preFilterShardSize) { this.preFilterShardSize = preFilterShardSize; } + /** + * Returns value of user-provided phase_took query parameter for this search request. + */ + public Boolean isPhaseTook() { + return phaseTook; + } + + /** + * Sets value of phase_took query param if provided by user. Defaults to null. + */ + public void setPhaseTook(Boolean phaseTook) { + this.phaseTook = phaseTook; + } + /** * Returns a threshold that enforces a pre-filter roundtrip to pre-filter search shards based on query rewriting if the number of shards * the search request expands to exceeds the threshold, or null if the threshold is unspecified. @@ -719,7 +742,8 @@ public boolean equals(Object o) { && absoluteStartMillis == that.absoluteStartMillis && ccsMinimizeRoundtrips == that.ccsMinimizeRoundtrips && Objects.equals(cancelAfterTimeInterval, that.cancelAfterTimeInterval) - && Objects.equals(pipeline, that.pipeline); + && Objects.equals(pipeline, that.pipeline) + && Objects.equals(phaseTook, that.phaseTook); } @Override @@ -740,7 +764,8 @@ public int hashCode() { localClusterAlias, absoluteStartMillis, ccsMinimizeRoundtrips, - cancelAfterTimeInterval + cancelAfterTimeInterval, + phaseTook ); } @@ -783,6 +808,8 @@ public String toString() { + cancelAfterTimeInterval + ", pipeline=" + pipeline + + ", phaseTook=" + + phaseTook + "}"; } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponse.java b/server/src/main/java/org/opensearch/action/search/SearchResponse.java index a546311a1f668..91f0dc0737637 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.search; import org.apache.lucene.search.TotalHits; +import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.StatusToXContentObject; @@ -63,7 +64,9 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.function.Supplier; @@ -94,6 +97,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb private final ShardSearchFailure[] shardFailures; private final Clusters clusters; private final long tookInMillis; + private final PhaseTook phaseTook; public SearchResponse(StreamInput in) throws IOException { super(in); @@ -112,6 +116,11 @@ public SearchResponse(StreamInput in) throws IOException { clusters = new Clusters(in); scrollId = in.readOptionalString(); tookInMillis = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + phaseTook = in.readOptionalWriteable(PhaseTook::new); + } else { + phaseTook = null; + } skippedShards = in.readVInt(); pointInTimeId = in.readOptionalString(); } @@ -126,7 +135,32 @@ public SearchResponse( ShardSearchFailure[] shardFailures, Clusters clusters ) { - this(internalResponse, scrollId, totalShards, successfulShards, skippedShards, tookInMillis, shardFailures, clusters, null); + this(internalResponse, scrollId, totalShards, successfulShards, skippedShards, tookInMillis, null, shardFailures, clusters, null); + } + + public SearchResponse( + SearchResponseSections internalResponse, + String scrollId, + int totalShards, + int successfulShards, + int skippedShards, + long tookInMillis, + ShardSearchFailure[] shardFailures, + Clusters clusters, + String pointInTimeId + ) { + this( + internalResponse, + scrollId, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + null, + shardFailures, + clusters, + pointInTimeId + ); } public SearchResponse( @@ -136,6 +170,7 @@ public SearchResponse( int successfulShards, int skippedShards, long tookInMillis, + PhaseTook phaseTook, ShardSearchFailure[] shardFailures, Clusters clusters, String pointInTimeId @@ -148,6 +183,7 @@ public SearchResponse( this.successfulShards = successfulShards; this.skippedShards = skippedShards; this.tookInMillis = tookInMillis; + this.phaseTook = phaseTook; this.shardFailures = shardFailures; assert skippedShards <= totalShards : "skipped: " + skippedShards + " total: " + totalShards; assert scrollId == null || pointInTimeId == null : "SearchResponse can't have both scrollId [" @@ -210,6 +246,13 @@ public TimeValue getTook() { return new TimeValue(tookInMillis); } + /** + * How long the request took in each search phase. + */ + public PhaseTook getPhaseTook() { + return phaseTook; + } + /** * The total number of shards the search was executed on. */ @@ -298,6 +341,9 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t builder.field(POINT_IN_TIME_ID.getPreferredName(), pointInTimeId); } builder.field(TOOK.getPreferredName(), tookInMillis); + if (phaseTook != null) { + phaseTook.toXContent(builder, params); + } builder.field(TIMED_OUT.getPreferredName(), isTimedOut()); if (isTerminatedEarly() != null) { builder.field(TERMINATED_EARLY.getPreferredName(), isTerminatedEarly()); @@ -337,6 +383,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE Boolean terminatedEarly = null; int numReducePhases = 1; long tookInMillis = -1; + PhaseTook phaseTook = null; int successfulShards = -1; int totalShards = -1; int skippedShards = 0; // 0 for BWC @@ -401,6 +448,24 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE parser.skipChildren(); } } + } else if (PhaseTook.PHASE_TOOK.match(currentFieldName, parser.getDeprecationHandler())) { + Map phaseTookMap = new HashMap<>(); + + while ((token = parser.nextToken()) != Token.END_OBJECT) { + if (token == Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + try { + SearchPhaseName.valueOf(currentFieldName.toUpperCase(Locale.ROOT)); + phaseTookMap.put(currentFieldName, parser.longValue()); + } catch (final IllegalArgumentException ex) { + parser.skipChildren(); + } + } else { + parser.skipChildren(); + } + } + phaseTook = new PhaseTook(phaseTookMap); } else if (Clusters._CLUSTERS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { int successful = -1; int total = -1; @@ -472,6 +537,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE successfulShards, skippedShards, tookInMillis, + phaseTook, failures.toArray(ShardSearchFailure.EMPTY_ARRAY), clusters, searchContextId @@ -491,6 +557,9 @@ public void writeTo(StreamOutput out) throws IOException { clusters.writeTo(out); out.writeOptionalString(scrollId); out.writeVLong(tookInMillis); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalWriteable(phaseTook); + } out.writeVInt(skippedShards); out.writeOptionalString(pointInTimeId); } @@ -604,6 +673,67 @@ public String toString() { } } + /** + * Holds info about the clusters that the search was executed on: how many in total, how many of them were successful + * and how many of them were skipped. + * + * @opensearch.internal + */ + public static class PhaseTook implements ToXContentFragment, Writeable { + static final ParseField PHASE_TOOK = new ParseField("phase_took"); + private final Map phaseTookMap; + + public PhaseTook(Map phaseTookMap) { + this.phaseTookMap = phaseTookMap; + } + + private PhaseTook(StreamInput in) throws IOException { + this(in.readMap(StreamInput::readString, StreamInput::readLong)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(phaseTookMap, StreamOutput::writeString, StreamOutput::writeLong); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(PHASE_TOOK.getPreferredName()); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + if (phaseTookMap.containsKey(searchPhaseName.getName())) { + builder.field(searchPhaseName.getName(), phaseTookMap.get(searchPhaseName.getName())); + } else { + builder.field(searchPhaseName.getName(), 0); + } + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + PhaseTook phaseTook = (PhaseTook) o; + + if (phaseTook.phaseTookMap.equals(phaseTookMap)) { + return true; + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash(phaseTookMap); + } + } + static SearchResponse empty(Supplier tookInMillisSupplier, Clusters clusters) { SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), Float.NaN); InternalSearchResponse internalSearchResponse = new InternalSearchResponse( diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java index f90e98106f93f..054bd578cc56c 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java @@ -236,6 +236,7 @@ SearchResponse getMergedResponse(SearchResponse.Clusters clusters) { successfulShards, skippedShards, tookInMillis, + searchTimeProvider.getPhaseTook(), shardFailures, clusters, null diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index cff1005beff27..284f71bd9da62 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -98,6 +98,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -154,6 +155,14 @@ public class TransportSearchAction extends HandledTransportAction SEARCH_PHASE_TOOK_ENABLED = Setting.boolSetting( + SEARCH_PHASE_TOOK_ENABLED_KEY, + false, + Property.Dynamic, + Property.NodeScope + ); + private final NodeClient client; private final ThreadPool threadPool; private final ClusterService clusterService; @@ -252,6 +261,8 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust } /** + * Listener to track request-level tookTime and phase tookTimes from the coordinator. + * * Search operations need two clocks. One clock is to fulfill real clock needs (e.g., resolving * "now" to an index name). Another clock is needed for measuring how long a search operation * took. These two uses are at odds with each other. There are many issues with using a real @@ -261,11 +272,12 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust * * @opensearch.internal */ - static final class SearchTimeProvider { + static final class SearchTimeProvider implements SearchRequestOperationsListener { private final long absoluteStartMillis; private final long relativeStartNanos; private final LongSupplier relativeCurrentNanosProvider; + private boolean phaseTook = false; /** * Instantiates a new search time provider. The absolute start time is the real clock time @@ -291,6 +303,47 @@ long getAbsoluteStartMillis() { long buildTookInMillis() { return TimeUnit.NANOSECONDS.toMillis(relativeCurrentNanosProvider.getAsLong() - relativeStartNanos); } + + public void setPhaseTook(boolean phaseTook) { + this.phaseTook = phaseTook; + } + + public boolean isPhaseTook() { + return phaseTook; + } + + SearchResponse.PhaseTook getPhaseTook() { + if (phaseTook) { + Map phaseTookMap = new HashMap<>(); + // Convert Map to Map for SearchResponse() + for (SearchPhaseName searchPhaseName : phaseStatsMap.keySet()) { + phaseTookMap.put(searchPhaseName.getName(), phaseStatsMap.get(searchPhaseName)); + } + return new SearchResponse.PhaseTook(phaseTookMap); + } else { + return null; + } + } + + Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); + + @Override + public void onPhaseStart(SearchPhaseContext context) {} + + @Override + public void onPhaseEnd(SearchPhaseContext context) { + phaseStatsMap.put( + context.getCurrentPhase().getSearchPhaseName(), + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - context.getCurrentPhase().getStartTimeInNanos()) + ); + } + + @Override + public void onPhaseFailure(SearchPhaseContext context) {} + + public Long getPhaseTookTime(SearchPhaseName searchPhaseName) { + return phaseStatsMap.get(searchPhaseName); + } } @Override @@ -332,13 +385,6 @@ public void executeRequest( SinglePhaseSearchAction phaseSearchAction, ActionListener listener ) { - final List searchListenersList = createSearchListenerList(); - final SearchRequestOperationsListener searchRequestOperationsListener; - if (!CollectionUtils.isEmpty(searchListenersList)) { - searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger); - } else { - searchRequestOperationsListener = null; - } executeRequest(task, searchRequest, new SearchAsyncActionProvider() { @Override public AbstractSearchAsyncAction asyncSearchAction( @@ -355,7 +401,8 @@ public AbstractSearchAsyncAction asyncSearchAction( ActionListener listener, boolean preFilter, ThreadPool threadPool, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestOperationsListener searchRequestOperationsListener ) { return new AbstractSearchAsyncAction( actionName, @@ -419,6 +466,16 @@ private void executeRequest( relativeStartNanos, System::nanoTime ); + + final List searchListenersList = createSearchListenerList(originalSearchRequest, timeProvider); + + final SearchRequestOperationsListener searchRequestOperationsListener; + if (!CollectionUtils.isEmpty(searchListenersList)) { + searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger); + } else { + searchRequestOperationsListener = null; + } + PipelinedRequest searchRequest; ActionListener listener; try { @@ -462,7 +519,8 @@ private void executeRequest( clusterState, listener, searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestOperationsListener ); } else { if (shouldMinimizeRoundtrips(searchRequest)) { @@ -483,7 +541,8 @@ private void executeRequest( clusterState, l, searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestOperationsListener ) ); } else { @@ -533,7 +592,8 @@ private void executeRequest( listener, new SearchResponse.Clusters(totalClusters, successfulClusters, skippedClusters.get()), searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestOperationsListener ); }, listener::onFailure) ); @@ -622,6 +682,7 @@ public void onResponse(SearchResponse searchResponse) { searchResponse.getSuccessfulShards(), searchResponse.getSkippedShards(), timeProvider.buildTookInMillis(), + timeProvider.getPhaseTook(), searchResponse.getShardFailures(), new SearchResponse.Clusters(1, 1, 0), searchResponse.pointInTimeId() @@ -811,7 +872,8 @@ private void executeLocalSearch( ClusterState clusterState, ActionListener listener, SearchContextId searchContext, - SearchAsyncActionProvider searchAsyncActionProvider + SearchAsyncActionProvider searchAsyncActionProvider, + SearchRequestOperationsListener searchRequestOperationsListener ) { executeSearch( (SearchTask) task, @@ -825,7 +887,8 @@ private void executeLocalSearch( listener, SearchResponse.Clusters.EMPTY, searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestOperationsListener ); } @@ -943,7 +1006,8 @@ private void executeSearch( ActionListener listener, SearchResponse.Clusters clusters, @Nullable SearchContextId searchContext, - SearchAsyncActionProvider searchAsyncActionProvider + SearchAsyncActionProvider searchAsyncActionProvider, + SearchRequestOperationsListener searchRequestOperationsListener ) { clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name @@ -1044,7 +1108,8 @@ private void executeSearch( listener, preFilterSearchShards, threadPool, - clusters + clusters, + searchRequestOperationsListener ).start(); } @@ -1127,15 +1192,30 @@ AbstractSearchAsyncAction asyncSearchAction( ActionListener listener, boolean preFilter, ThreadPool threadPool, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestOperationsListener searchRequestOperationsListener ); } - private List createSearchListenerList() { + private List createSearchListenerList(SearchRequest searchRequest, SearchTimeProvider timeProvider) { final List searchListenersList = new ArrayList<>(); + if (isRequestStatsEnabled) { searchListenersList.add(searchRequestStats); } + + // phase_took is enabled with request param and/or cluster setting + Boolean phaseTookRequestParam = searchRequest.isPhaseTook(); + if (phaseTookRequestParam == null) { // check cluster setting only when request param is undefined + if (clusterService.getClusterSettings().get(TransportSearchAction.SEARCH_PHASE_TOOK_ENABLED)) { + timeProvider.setPhaseTook(true); + searchListenersList.add(timeProvider); + } + } else if (phaseTookRequestParam == true) { + timeProvider.setPhaseTook(true); + searchListenersList.add(timeProvider); + } + return searchListenersList; } @@ -1153,15 +1233,9 @@ private AbstractSearchAsyncAction searchAsyncAction ActionListener listener, boolean preFilter, ThreadPool threadPool, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestOperationsListener searchRequestOperationsListener ) { - final List searchListenersList = createSearchListenerList(); - final SearchRequestOperationsListener searchRequestOperationsListener; - if (!CollectionUtils.isEmpty(searchListenersList)) { - searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger); - } else { - searchRequestOperationsListener = null; - } if (preFilter) { return new CanMatchPreFilterSearchPhase( logger, @@ -1192,7 +1266,8 @@ private AbstractSearchAsyncAction searchAsyncAction listener, false, threadPool, - clusters + clusters, + searchRequestOperationsListener ); return new SearchPhase(action.getName()) { @Override diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 4cd3490cffb4c..ad2b89aa3948d 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -375,6 +375,7 @@ public void apply(Settings value, Settings current, Settings previous) { TransportSearchAction.SHARD_COUNT_LIMIT_SETTING, TransportSearchAction.SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING, TransportSearchAction.SEARCH_REQUEST_STATS_ENABLED, + TransportSearchAction.SEARCH_PHASE_TOOK_ENABLED, RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE, SniffConnectionStrategy.REMOTE_CONNECTIONS_PER_CLUSTER, RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java index ebfd082d974fd..080366e536da1 100644 --- a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java @@ -180,6 +180,12 @@ public static void parseSearchRequest( searchRequest.allowPartialSearchResults(request.paramAsBoolean("allow_partial_search_results", null)); } + if (request.hasParam("phase_took")) { + // only set if we have the parameter passed to override the cluster-level default + // else phaseTook = null + searchRequest.setPhaseTook(request.paramAsBoolean("phase_took", true)); + } + // do not allow 'query_and_fetch' or 'dfs_query_and_fetch' search types // from the REST layer. these modes are an internal optimization and should // not be specified explicitly by the user. diff --git a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java index f628bb3201452..edac50813e191 100644 --- a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java @@ -688,7 +688,11 @@ private SearchDfsQueryThenFetchAsyncAction createSearchDfsQueryThenFetchAsyncAct ); AtomicReference exception = new AtomicReference<>(); ActionListener listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set); - + TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( + 0, + System.nanoTime(), + System::nanoTime + ); return new SearchDfsQueryThenFetchAsyncAction( logger, null, @@ -702,7 +706,7 @@ private SearchDfsQueryThenFetchAsyncAction createSearchDfsQueryThenFetchAsyncAct searchRequest, listener, shardsIter, - null, + timeProvider, null, task, SearchResponse.Clusters.EMPTY, @@ -734,6 +738,11 @@ private SearchQueryThenFetchAsyncAction createSearchQueryThenFetchAsyncAction( ); AtomicReference exception = new AtomicReference<>(); ActionListener listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set); + TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( + 0, + System.nanoTime(), + System::nanoTime + ); return new SearchQueryThenFetchAsyncAction( logger, null, @@ -747,7 +756,7 @@ private SearchQueryThenFetchAsyncAction createSearchQueryThenFetchAsyncAction( searchRequest, listener, shardsIter, - null, + timeProvider, null, task, SearchResponse.Clusters.EMPTY, diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java index 25d8c5551880f..cdd0ea863ce37 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java @@ -244,6 +244,7 @@ private SearchRequest mutate(SearchRequest searchRequest) { ); mutators.add(() -> mutation.source(randomValueOtherThan(searchRequest.source(), this::createSearchSourceBuilder))); mutators.add(() -> mutation.setCcsMinimizeRoundtrips(searchRequest.isCcsMinimizeRoundtrips() == false)); + mutators.add(() -> mutation.setPhaseTook(searchRequest.isPhaseTook() == false)); mutators.add( () -> mutation.setCancelAfterTimeInterval( searchRequest.getCancelAfterTimeInterval() != null diff --git a/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java b/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java index 097e922147698..c9e59ab4ea04d 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java @@ -74,7 +74,9 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.UUID; import static java.util.Collections.singletonMap; @@ -152,6 +154,11 @@ public SearchResponse createTestItem( Boolean terminatedEarly = randomBoolean() ? null : randomBoolean(); int numReducePhases = randomIntBetween(1, 10); long tookInMillis = randomNonNegativeLong(); + Map phaseTookMap = new HashMap<>(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + phaseTookMap.put(searchPhaseName.getName(), randomNonNegativeLong()); + } + SearchResponse.PhaseTook phaseTook = new SearchResponse.PhaseTook(phaseTookMap); int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = randomIntBetween(0, totalShards); @@ -182,6 +189,7 @@ public SearchResponse createTestItem( successfulShards, skippedShards, tookInMillis, + phaseTook, shardSearchFailures, randomBoolean() ? randomClusters() : SearchResponse.Clusters.EMPTY, null @@ -353,6 +361,14 @@ public void testToXContent() { assertEquals(1, searchExtBuilders.size()); } { + Map phaseTookMap = new HashMap<>(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + phaseTookMap.put(searchPhaseName.getName(), 0L); + } + phaseTookMap.put(SearchPhaseName.QUERY.getName(), 50L); + phaseTookMap.put(SearchPhaseName.FETCH.getName(), 25L); + phaseTookMap.put(SearchPhaseName.EXPAND.getName(), 30L); + SearchResponse.PhaseTook phaseTook = new SearchResponse.PhaseTook(phaseTookMap); SearchResponse response = new SearchResponse( new InternalSearchResponse( new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), @@ -368,13 +384,24 @@ public void testToXContent() { 0, 0, 0, + phaseTook, ShardSearchFailure.EMPTY_ARRAY, - new SearchResponse.Clusters(5, 3, 2) + new SearchResponse.Clusters(5, 3, 2), + null ); StringBuilder expectedString = new StringBuilder(); expectedString.append("{"); { expectedString.append("\"took\":0,"); + expectedString.append("\"phase_took\":"); + { + expectedString.append("{\"dfs_pre_query\":0,"); + expectedString.append("\"query\":50,"); + expectedString.append("\"fetch\":25,"); + expectedString.append("\"dfs_query\":0,"); + expectedString.append("\"expand\":30,"); + expectedString.append("\"can_match\":0},"); + } expectedString.append("\"timed_out\":false,"); expectedString.append("\"_shards\":"); { @@ -477,6 +504,24 @@ public void testToXContentEmptyClusters() throws IOException { assertEquals(0, builder.toString().length()); } + public void testSearchResponsePhaseTookEquals() throws IOException { + SearchResponse.PhaseTook phaseTookA = new SearchResponse.PhaseTook(Map.of("foo", 0L, "bar", 1L)); + SearchResponse.PhaseTook phaseTookB = new SearchResponse.PhaseTook(Map.of("foo", 1L, "bar", 1L)); + SearchResponse.PhaseTook phaseTookC = new SearchResponse.PhaseTook(Map.of("foo", 0L)); + SearchResponse.PhaseTook phaseTookD = new SearchResponse.PhaseTook(Map.of()); + + assertNotEquals(phaseTookA, phaseTookB); + assertNotEquals(phaseTookB, phaseTookA); + assertNotEquals(phaseTookA, phaseTookC); + assertNotEquals(phaseTookC, phaseTookA); + assertNotEquals(phaseTookA, phaseTookD); + assertNotEquals(phaseTookD, phaseTookA); + assertEquals(phaseTookA, phaseTookA); + assertEquals(phaseTookB, phaseTookB); + assertEquals(phaseTookC, phaseTookC); + assertEquals(phaseTookD, phaseTookD); + } + static class DummySearchExtBuilder extends SearchExtBuilder { static ParseField DUMMY_FIELD = new ParseField("dummy"); diff --git a/server/src/test/java/org/opensearch/action/search/SearchTimeProviderTests.java b/server/src/test/java/org/opensearch/action/search/SearchTimeProviderTests.java new file mode 100644 index 0000000000000..f0f1a43e6c21e --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchTimeProviderTests.java @@ -0,0 +1,54 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SearchTimeProviderTests extends OpenSearchTestCase { + + public void testSearchTimeProviderPhaseFailure() { + TransportSearchAction.SearchTimeProvider testTimeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + testTimeProvider.onPhaseStart(ctx); + assertNull(testTimeProvider.getPhaseTookTime(searchPhaseName)); + testTimeProvider.onPhaseFailure(ctx); + assertNull(testTimeProvider.getPhaseTookTime(searchPhaseName)); + } + } + + public void testSearchTimeProviderPhaseEnd() { + TransportSearchAction.SearchTimeProvider testTimeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); + + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + long tookTimeInMillis = randomIntBetween(1, 100); + testTimeProvider.onPhaseStart(ctx); + long startTime = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(tookTimeInMillis); + when(mockSearchPhase.getStartTimeInNanos()).thenReturn(startTime); + assertNull(testTimeProvider.getPhaseTookTime(searchPhaseName)); + testTimeProvider.onPhaseEnd(ctx); + assertThat(testTimeProvider.getPhaseTookTime(searchPhaseName), greaterThanOrEqualTo(tookTimeInMillis)); + } + } +} diff --git a/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java b/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java index b942136e1f1e2..74de1e6d96d93 100644 --- a/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java +++ b/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java @@ -131,6 +131,9 @@ public static SearchRequest randomSearchRequest(Supplier ra if (randomBoolean()) { searchRequest.setCancelAfterTimeInterval(TimeValue.parseTimeValue(randomTimeValue(), null, "cancel_after_time_interval")); } + if (randomBoolean()) { + searchRequest.setPhaseTook(randomBoolean()); + } return searchRequest; } From 53be8f377a9de7b16da99def03b5e87f39cce430 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Fri, 13 Oct 2023 14:58:26 -0700 Subject: [PATCH 27/33] Fix broken test testCommitOnCloseThrowsException_decRefStore (#10621) This test was incorrectly hardcoding a segment to delete to force corruption. If a merge occurs randomly before segments are copied the expected segment did not exist failing the test. This will now look for any .si and delete it. Signed-off-by: Marc Handalian --- .../opensearch/index/engine/NRTReplicationEngineTests.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java index ee25d3789fb13..09484cd1b5840 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -35,6 +35,7 @@ import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicLong; @@ -591,7 +592,9 @@ public void testCommitOnCloseThrowsException_decRefStore() throws Exception { indexOperations(nrtEngine, operations); // wipe the nrt directory initially so we can sync with primary. cleanAndCopySegmentsFromPrimary(nrtEngine); - nrtEngineStore.directory().deleteFile("_0.si"); + final Optional toDelete = Set.of(nrtEngineStore.directory().listAll()).stream().filter(f -> f.endsWith(".si")).findAny(); + assertTrue(toDelete.isPresent()); + nrtEngineStore.directory().deleteFile(toDelete.get()); assertEquals(2, nrtEngineStore.refCount()); nrtEngine.close(); assertEquals(1, nrtEngineStore.refCount()); From 6c1bd487da8ca1794f29302d7a6c6a713f9c6a01 Mon Sep 17 00:00:00 2001 From: Bhumika Saini Date: Sat, 14 Oct 2023 19:46:09 +0530 Subject: [PATCH 28/33] [Remote Store] Add Remote Store backpressure rejection stats to _nodes/stats (#10524) Signed-off-by: Bhumika Saini --- CHANGELOG.md | 1 + .../index/remote/RemoteSegmentStats.java | 51 ++++++++++++++++--- .../remote/RemoteSegmentTransferTracker.java | 3 +- .../cluster/node/stats/NodeStatsTests.java | 1 + .../index/shard/IndexShardTests.java | 4 ++ 5 files changed, 52 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a53d31c7861ff..011e404c3d302 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -90,6 +90,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies ### Changed +- [Remote Store] Add Remote Store backpressure rejection stats to `_nodes/stats` ([#10524](https://github.com/opensearch-project/OpenSearch/pull/10524)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java index c7863536adf20..5992923a4157b 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java @@ -8,6 +8,7 @@ package org.opensearch.index.remote; +import org.opensearch.Version; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; @@ -75,6 +76,10 @@ public class RemoteSegmentStats implements Writeable, ToXContentFragment { * Total time spent in downloading segments from remote store */ private long totalDownloadTime; + /** + * Total rejections due to remote store upload backpressure + */ + private long totalRejections; public RemoteSegmentStats() {} @@ -90,6 +95,10 @@ public RemoteSegmentStats(StreamInput in) throws IOException { totalRefreshBytesLag = in.readLong(); totalUploadTime = in.readLong(); totalDownloadTime = in.readLong(); + // TODO: change to V_2_12_0 on main after backport to 2.x + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + totalRejections = in.readVLong(); + } } /** @@ -115,6 +124,7 @@ public RemoteSegmentStats(RemoteSegmentTransferTracker.Stats trackerStats) { this.totalRefreshBytesLag = trackerStats.bytesLag; this.totalUploadTime = trackerStats.totalUploadTimeInMs; this.totalDownloadTime = trackerStats.directoryFileTransferTrackerStats.totalTransferTimeInMs; + this.totalRejections = trackerStats.rejectionCount; } // Getter and setters. All are visible for testing @@ -207,6 +217,14 @@ public void addTotalDownloadTime(long totalDownloadTime) { this.totalDownloadTime += totalDownloadTime; } + public long getTotalRejections() { + return totalRejections; + } + + public void addTotalRejections(long totalRejections) { + this.totalRejections += totalRejections; + } + /** * Adds existing stats. Used for stats roll-ups at index or node level * @@ -225,6 +243,7 @@ public void add(RemoteSegmentStats existingStats) { this.totalRefreshBytesLag += existingStats.getTotalRefreshBytesLag(); this.totalUploadTime += existingStats.getTotalUploadTime(); this.totalDownloadTime += existingStats.getTotalDownloadTime(); + this.totalRejections += existingStats.totalRejections; } } @@ -241,18 +260,26 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(totalRefreshBytesLag); out.writeLong(totalUploadTime); out.writeLong(totalDownloadTime); + // TODO: change to V_2_12_0 on main after backport to 2.x + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeVLong(totalRejections); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.REMOTE_STORE); + builder.startObject(Fields.UPLOAD); buildUploadStats(builder); - builder.endObject(); + builder.endObject(); // UPLOAD + builder.startObject(Fields.DOWNLOAD); buildDownloadStats(builder); - builder.endObject(); - builder.endObject(); + builder.endObject(); // DOWNLOAD + + builder.endObject(); // REMOTE_STORE + return builder; } @@ -261,13 +288,19 @@ private void buildUploadStats(XContentBuilder builder) throws IOException { builder.humanReadableField(Fields.STARTED_BYTES, Fields.STARTED, new ByteSizeValue(uploadBytesStarted)); builder.humanReadableField(Fields.SUCCEEDED_BYTES, Fields.SUCCEEDED, new ByteSizeValue(uploadBytesSucceeded)); builder.humanReadableField(Fields.FAILED_BYTES, Fields.FAILED, new ByteSizeValue(uploadBytesFailed)); - builder.endObject(); + builder.endObject(); // TOTAL_UPLOAD_SIZE + builder.startObject(Fields.REFRESH_SIZE_LAG); builder.humanReadableField(Fields.TOTAL_BYTES, Fields.TOTAL, new ByteSizeValue(totalRefreshBytesLag)); builder.humanReadableField(Fields.MAX_BYTES, Fields.MAX, new ByteSizeValue(maxRefreshBytesLag)); - builder.endObject(); + builder.endObject(); // REFRESH_SIZE_LAG + builder.humanReadableField(Fields.MAX_REFRESH_TIME_LAG_IN_MILLIS, Fields.MAX_REFRESH_TIME_LAG, new TimeValue(maxRefreshTimeLag)); builder.humanReadableField(Fields.TOTAL_TIME_SPENT_IN_MILLIS, Fields.TOTAL_TIME_SPENT, new TimeValue(totalUploadTime)); + + builder.startObject(Fields.PRESSURE); + builder.field(Fields.TOTAL_REJECTIONS, totalRejections); + builder.endObject(); // PRESSURE } private void buildDownloadStats(XContentBuilder builder) throws IOException { @@ -300,6 +333,8 @@ static final class Fields { static final String MAX_BYTES = "max_bytes"; static final String TOTAL_TIME_SPENT = "total_time_spent"; static final String TOTAL_TIME_SPENT_IN_MILLIS = "total_time_spent_in_millis"; + static final String PRESSURE = "pressure"; + static final String TOTAL_REJECTIONS = "total_rejections"; } @Override @@ -318,7 +353,8 @@ public boolean equals(Object o) { && maxRefreshBytesLag == that.maxRefreshBytesLag && totalRefreshBytesLag == that.totalRefreshBytesLag && totalUploadTime == that.totalUploadTime - && totalDownloadTime == that.totalDownloadTime; + && totalDownloadTime == that.totalDownloadTime + && totalRejections == that.totalRejections; } @Override @@ -334,7 +370,8 @@ public int hashCode() { maxRefreshBytesLag, totalRefreshBytesLag, totalUploadTime, - totalDownloadTime + totalDownloadTime, + totalRejections ); } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java index 05081180bb179..2a703f17aa953 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java @@ -267,7 +267,8 @@ public long getRejectionCount() { return rejectionCount.get(); } - void incrementRejectionCount() { + /** public only for testing **/ + public void incrementRejectionCount() { rejectionCount.incrementAndGet(); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index e3f16463a5328..e0b35c69cc3c0 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -815,6 +815,7 @@ private static NodeIndicesStats getNodeIndicesStats(boolean remoteStoreStats) { remoteSegmentStats.setMaxRefreshTimeLag(2L); remoteSegmentStats.addTotalUploadTime(20L); remoteSegmentStats.addTotalDownloadTime(20L); + remoteSegmentStats.addTotalRejections(5L); RemoteTranslogStats remoteTranslogStats = indicesStats.getTranslog().getRemoteTranslogStats(); RemoteTranslogStats otherRemoteTranslogStats = new RemoteTranslogStats(getRandomRemoteTranslogTransferTrackerStats()); diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index b2eb41828a4df..9ef9bec01cb38 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -4910,6 +4910,8 @@ private void populateSampleRemoteSegmentStats(RemoteSegmentTransferTracker track tracker.addUploadBytesStarted(30L); tracker.addUploadBytesSucceeded(10L); tracker.addUploadBytesFailed(10L); + tracker.incrementRejectionCount(); + tracker.incrementRejectionCount(); } private void populateSampleRemoteTranslogStats(RemoteTranslogTransferTracker tracker) { @@ -4943,5 +4945,7 @@ private static void assertRemoteSegmentStats( assertEquals(remoteSegmentTransferTracker.getUploadBytesStarted(), remoteSegmentStats.getUploadBytesStarted()); assertEquals(remoteSegmentTransferTracker.getUploadBytesSucceeded(), remoteSegmentStats.getUploadBytesSucceeded()); assertEquals(remoteSegmentTransferTracker.getUploadBytesFailed(), remoteSegmentStats.getUploadBytesFailed()); + assertTrue(remoteSegmentStats.getTotalRejections() > 0); + assertEquals(remoteSegmentTransferTracker.getRejectionCount(), remoteSegmentStats.getTotalRejections()); } } From 01a6ffdf10bfc49341016b244b56bdb1f79e0d11 Mon Sep 17 00:00:00 2001 From: Louis Chu Date: Sat, 14 Oct 2023 15:05:37 -0400 Subject: [PATCH 29/33] Update release note and changelog (#10609) Signed-off-by: Louis Chu --- CHANGELOG.md | 15 ++++++++++++-- .../opensearch.release-notes-2.11.0.md | 20 ++++++------------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 011e404c3d302..644fb05900ceb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,9 +11,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) - Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) - Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) -- Implement Visitor Design pattern in QueryBuilder to enable the capability to traverse through the complex QueryBuilder tree. ([#10110](https://github.com/opensearch-project/OpenSearch/pull/10110)) - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) -- Configurable merge policy for index with an option to choose from LogByteSize and Tiered merge policy ([#9992](https://github.com/opensearch-project/OpenSearch/pull/9992)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 @@ -88,8 +86,18 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) ### Dependencies +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) +- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) +- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.5.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295)) +- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) +- Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) +- Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) +- Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294)) ### Changed +- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) +- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) +- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)) - [Remote Store] Add Remote Store backpressure rejection stats to `_nodes/stats` ([#10524](https://github.com/opensearch-project/OpenSearch/pull/10524)) ### Deprecated @@ -97,6 +105,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Removed ### Fixed +- Fix class_cast_exception when passing int to _version and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) +- Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) +- Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) ### Security diff --git a/release-notes/opensearch.release-notes-2.11.0.md b/release-notes/opensearch.release-notes-2.11.0.md index d7e9182f2a656..7ebf1b433c7c6 100644 --- a/release-notes/opensearch.release-notes-2.11.0.md +++ b/release-notes/opensearch.release-notes-2.11.0.md @@ -8,13 +8,16 @@ - Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694)) - [Telemetry-Otel] Added support for OtlpGrpcSpanExporter exporter ([#9666](https://github.com/opensearch-project/OpenSearch/pull/9666)) - Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131)) +- Implement Visitor Design pattern in QueryBuilder to enable the capability to traverse through the complex QueryBuilder tree. ([#10110](https://github.com/opensearch-project/OpenSearch/pull/10110)) - Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189)) - Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562)) - Add unreferenced file cleanup count to merge stats ([#10204](https://github.com/opensearch-project/OpenSearch/pull/10204)) +- Configurable merge policy for index with an option to choose from LogByteSize and Tiered merge policy ([#9992](https://github.com/opensearch-project/OpenSearch/pull/9992)) - [Remote Store] Add support to restrict creation & deletion if system repository and mutation of immutable settings of system repository ([#9839](https://github.com/opensearch-project/OpenSearch/pull/9839)) - Improve compressed request handling ([#10261](https://github.com/opensearch-project/OpenSearch/pull/10261)) ### Dependencies +- Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) - Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) - Bump `actions/checkout` from 2 to 4 ([#9968](https://github.com/opensearch-project/OpenSearch/pull/9968)) - Bump OpenTelemetry from 1.26.0 to 1.30.1 ([#9950](https://github.com/opensearch-project/OpenSearch/pull/9950)) @@ -25,30 +28,22 @@ - Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098)) - Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125)) - Bump `org.wiremock:wiremock-standalone` from 2.35.0 to 3.1.0 ([#9752](https://github.com/opensearch-project/OpenSearch/pull/9752)) +- Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) +- Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) - Bump `com.google.http-client:google-http-client-jackson2` from 1.43.2 to 1.43.3 ([#10126](https://github.com/opensearch-project/OpenSearch/pull/10126)) - Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.5 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206), [#10299](https://github.com/opensearch-project/OpenSearch/pull/10299)) -- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) -- Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) - Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` - Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` - Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) -- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.5.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295)) - Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) - Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) - Bump `peter-evans/create-pull-request` from 3 to 5 ([#10301](https://github.com/opensearch-project/OpenSearch/pull/10301)) - Bump `org.apache.avro:avro` from 1.11.2 to 1.11.3 ([#10210](https://github.com/opensearch-project/OpenSearch/pull/10210)) -- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) -- Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) -- Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) -- Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294)) - Bump `netty` from 4.1.99.Final to 4.1.100.Final ([#10564](https://github.com/opensearch-project/OpenSearch/pull/10564)) ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) - Allow parameterization of tests with OpenSearchIntegTestCase.SuiteScopeTestCase annotation ([#9916](https://github.com/opensearch-project/OpenSearch/pull/9916)) -- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) -- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) - Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) - [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) - Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246)) @@ -58,7 +53,6 @@ - [Remote Store] Add support to reload repository metadata inplace ([#9569](https://github.com/opensearch-project/OpenSearch/pull/9569)) - [Metrics Framework] Add Metrics framework. ([#10241](https://github.com/opensearch-project/OpenSearch/pull/10241)) - Updating the separator for RemoteStoreLockManager since underscore is allowed in base64UUID url charset ([#10379](https://github.com/opensearch-project/OpenSearch/pull/10379)) -- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)) ### Removed - Remove spurious SGID bit on directories ([#9447](https://github.com/opensearch-project/OpenSearch/pull/9447)) @@ -68,8 +62,6 @@ - Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045)) - Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) - Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) -- Fix class_cast_exception when passing int to _version and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) -- Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) - Fix registration and initialization of multiple extensions ([10256](https://github.com/opensearch-project/OpenSearch/pull/10256)) +- Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) - Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) -- Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) From ead7ade0b78628defdd28b09ddaf1d276966c619 Mon Sep 17 00:00:00 2001 From: Bhumika Saini Date: Mon, 16 Oct 2023 12:00:54 +0530 Subject: [PATCH 30/33] Update version check (#10630) Signed-off-by: Bhumika Saini --- .../org/opensearch/index/remote/RemoteSegmentStats.java | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java index 5992923a4157b..4228ec60c4524 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java @@ -95,8 +95,7 @@ public RemoteSegmentStats(StreamInput in) throws IOException { totalRefreshBytesLag = in.readLong(); totalUploadTime = in.readLong(); totalDownloadTime = in.readLong(); - // TODO: change to V_2_12_0 on main after backport to 2.x - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { totalRejections = in.readVLong(); } } @@ -260,8 +259,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(totalRefreshBytesLag); out.writeLong(totalUploadTime); out.writeLong(totalDownloadTime); - // TODO: change to V_2_12_0 on main after backport to 2.x - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeVLong(totalRejections); } } From 368d35abb398b7e379db281358529e4cb689ae05 Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Mon, 16 Oct 2023 17:01:57 +0530 Subject: [PATCH 31/33] Add resource usage trackers and resource usage collector service (#9890) --------- Signed-off-by: Bharathwaj G --- CHANGELOG.md | 1 + .../admin/cluster/node/stats/NodeStats.java | 23 ++- .../cluster/node/stats/NodesStatsRequest.java | 3 +- .../node/stats/TransportNodesStatsAction.java | 3 +- .../stats/TransportClusterStatsAction.java | 1 + .../common/settings/ClusterSettings.java | 5 + .../main/java/org/opensearch/node/Node.java | 25 ++- .../node/NodeResourceUsageStats.java | 81 ++++++++ .../java/org/opensearch/node/NodeService.java | 9 +- .../node/NodesResourceUsageStats.java | 69 +++++++ .../node/ResourceUsageCollectorService.java | 160 +++++++++++++++ .../tracker/AbstractAverageUsageTracker.java | 100 +++++++++ .../tracker/AverageCpuUsageTracker.java | 38 ++++ .../tracker/AverageMemoryUsageTracker.java | 42 ++++ .../tracker/NodeResourceUsageTracker.java | 118 +++++++++++ .../tracker/ResourceTrackerSettings.java | 90 +++++++++ .../node/resource/tracker/package-info.java | 12 ++ .../cluster/node/stats/NodeStatsTests.java | 45 +++++ .../opensearch/cluster/DiskUsageTests.java | 6 + .../ResourceUsageCollectorServiceTests.java | 190 ++++++++++++++++++ .../tracker/AverageUsageTrackerTests.java | 99 +++++++++ .../NodeResourceUsageTrackerTests.java | 96 +++++++++ .../MockInternalClusterInfoService.java | 1 + .../opensearch/test/InternalTestCluster.java | 1 + 24 files changed, 1211 insertions(+), 7 deletions(-) create mode 100644 server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java create mode 100644 server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java create mode 100644 server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java create mode 100644 server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java create mode 100644 server/src/main/java/org/opensearch/node/resource/tracker/AverageCpuUsageTracker.java create mode 100644 server/src/main/java/org/opensearch/node/resource/tracker/AverageMemoryUsageTracker.java create mode 100644 server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java create mode 100644 server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java create mode 100644 server/src/main/java/org/opensearch/node/resource/tracker/package-info.java create mode 100644 server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java create mode 100644 server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java create mode 100644 server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 644fb05900ceb..2211f59574718 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) - Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) +- [Admission control] Add Resource usage collector service and resource usage tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index dd36b3b8db3ab..69efea186d927 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -56,6 +56,7 @@ import org.opensearch.monitor.os.OsStats; import org.opensearch.monitor.process.ProcessStats; import org.opensearch.node.AdaptiveSelectionStats; +import org.opensearch.node.NodesResourceUsageStats; import org.opensearch.script.ScriptCacheStats; import org.opensearch.script.ScriptStats; import org.opensearch.search.backpressure.stats.SearchBackpressureStats; @@ -142,6 +143,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private SearchPipelineStats searchPipelineStats; + @Nullable + private NodesResourceUsageStats resourceUsageStats; + public NodeStats(StreamInput in) throws IOException { super(in); timestamp = in.readVLong(); @@ -198,6 +202,11 @@ public NodeStats(StreamInput in) throws IOException { } else { searchPipelineStats = null; } + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { // make it 2.12 when we backport + resourceUsageStats = in.readOptionalWriteable(NodesResourceUsageStats::new); + } else { + resourceUsageStats = null; + } } public NodeStats( @@ -216,6 +225,7 @@ public NodeStats( @Nullable DiscoveryStats discoveryStats, @Nullable IngestStats ingestStats, @Nullable AdaptiveSelectionStats adaptiveSelectionStats, + @Nullable NodesResourceUsageStats resourceUsageStats, @Nullable ScriptCacheStats scriptCacheStats, @Nullable IndexingPressureStats indexingPressureStats, @Nullable ShardIndexingPressureStats shardIndexingPressureStats, @@ -241,6 +251,7 @@ public NodeStats( this.discoveryStats = discoveryStats; this.ingestStats = ingestStats; this.adaptiveSelectionStats = adaptiveSelectionStats; + this.resourceUsageStats = resourceUsageStats; this.scriptCacheStats = scriptCacheStats; this.indexingPressureStats = indexingPressureStats; this.shardIndexingPressureStats = shardIndexingPressureStats; @@ -344,6 +355,11 @@ public AdaptiveSelectionStats getAdaptiveSelectionStats() { return adaptiveSelectionStats; } + @Nullable + public NodesResourceUsageStats getResourceUsageStats() { + return resourceUsageStats; + } + @Nullable public ScriptCacheStats getScriptCacheStats() { return scriptCacheStats; @@ -430,6 +446,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_9_0)) { out.writeOptionalWriteable(searchPipelineStats); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { // make it 2.12 when we backport + out.writeOptionalWriteable(resourceUsageStats); + } } @Override @@ -520,7 +539,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (getSearchPipelineStats() != null) { getSearchPipelineStats().toXContent(builder, params); } - + if (getResourceUsageStats() != null) { + getResourceUsageStats().toXContent(builder, params); + } return builder; } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index b0caa469033eb..99c9fb2d1e26a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -213,7 +213,8 @@ public enum Metric { WEIGHTED_ROUTING_STATS("weighted_routing"), FILE_CACHE_STATS("file_cache"), TASK_CANCELLATION("task_cancellation"), - SEARCH_PIPELINE("search_pipeline"); + SEARCH_PIPELINE("search_pipeline"), + RESOURCE_USAGE_STATS("resource_usage_stats"); private String metricName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 615abbaef845d..204157236a282 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -124,7 +124,8 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { NodesStatsRequest.Metric.WEIGHTED_ROUTING_STATS.containedIn(metrics), NodesStatsRequest.Metric.FILE_CACHE_STATS.containedIn(metrics), NodesStatsRequest.Metric.TASK_CANCELLATION.containedIn(metrics), - NodesStatsRequest.Metric.SEARCH_PIPELINE.containedIn(metrics) + NodesStatsRequest.Metric.SEARCH_PIPELINE.containedIn(metrics), + NodesStatsRequest.Metric.RESOURCE_USAGE_STATS.containedIn(metrics) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 18098bc31432f..d8323e209be23 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -168,6 +168,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq false, false, false, + false, false ); List shardsStats = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index ad2b89aa3948d..bb0910c4e5f9c 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -130,6 +130,7 @@ import org.opensearch.node.Node.DiscoverySettings; import org.opensearch.node.NodeRoleSettings; import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.node.resource.tracker.ResourceTrackerSettings; import org.opensearch.persistent.PersistentTasksClusterService; import org.opensearch.persistent.decider.EnableAssignmentDecider; import org.opensearch.plugins.PluginsService; @@ -655,6 +656,10 @@ public void apply(Settings value, Settings current, Settings previous) { SegmentReplicationPressureService.MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING, SegmentReplicationPressureService.MAX_ALLOWED_STALE_SHARDS, + // Settings related to resource trackers + ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING, + ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING, + // Settings related to Searchable Snapshots Node.NODE_SEARCH_CACHE_SIZE_SETTING, FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING, diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 5b3b064a47c66..c456f01135dee 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -167,6 +167,7 @@ import org.opensearch.monitor.fs.FsProbe; import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.node.resource.tracker.NodeResourceUsageTracker; import org.opensearch.persistent.PersistentTasksClusterService; import org.opensearch.persistent.PersistentTasksExecutor; import org.opensearch.persistent.PersistentTasksExecutorRegistry; @@ -805,7 +806,6 @@ protected Node( remoteStoreStatsTrackerFactory, recoverySettings ); - final AliasValidator aliasValidator = new AliasValidator(); final ShardLimitValidator shardLimitValidator = new ShardLimitValidator(settings, clusterService, systemIndices); @@ -1070,6 +1070,16 @@ protected Node( transportService.getTaskManager(), taskCancellationMonitoringSettings ); + final NodeResourceUsageTracker nodeResourceUsageTracker = new NodeResourceUsageTracker( + threadPool, + settings, + clusterService.getClusterSettings() + ); + final ResourceUsageCollectorService resourceUsageCollectorService = new ResourceUsageCollectorService( + nodeResourceUsageTracker, + clusterService, + threadPool + ); this.nodeService = new NodeService( settings, threadPool, @@ -1091,7 +1101,8 @@ protected Node( searchBackpressureService, searchPipelineService, fileCache, - taskCancellationMonitoringService + taskCancellationMonitoringService, + resourceUsageCollectorService ); final SearchService searchService = newSearchService( @@ -1212,6 +1223,8 @@ protected Node( b.bind(RerouteService.class).toInstance(rerouteService); b.bind(ShardLimitValidator.class).toInstance(shardLimitValidator); b.bind(FsHealthService.class).toInstance(fsHealthService); + b.bind(NodeResourceUsageTracker.class).toInstance(nodeResourceUsageTracker); + b.bind(ResourceUsageCollectorService.class).toInstance(resourceUsageCollectorService); b.bind(SystemIndices.class).toInstance(systemIndices); b.bind(IdentityService.class).toInstance(identityService); b.bind(Tracer.class).toInstance(tracer); @@ -1328,6 +1341,8 @@ public Node start() throws NodeValidationException { injector.getInstance(RepositoriesService.class).start(); injector.getInstance(SearchService.class).start(); injector.getInstance(FsHealthService.class).start(); + injector.getInstance(NodeResourceUsageTracker.class).start(); + injector.getInstance(ResourceUsageCollectorService.class).start(); nodeService.getMonitorService().start(); nodeService.getSearchBackpressureService().start(); nodeService.getTaskCancellationMonitoringService().start(); @@ -1490,6 +1505,8 @@ private Node stop() { injector.getInstance(ClusterService.class).stop(); injector.getInstance(NodeConnectionsService.class).stop(); injector.getInstance(FsHealthService.class).stop(); + injector.getInstance(NodeResourceUsageTracker.class).stop(); + injector.getInstance(ResourceUsageCollectorService.class).stop(); nodeService.getMonitorService().stop(); nodeService.getSearchBackpressureService().stop(); injector.getInstance(GatewayService.class).stop(); @@ -1553,6 +1570,10 @@ public synchronized void close() throws IOException { toClose.add(nodeService.getSearchBackpressureService()); toClose.add(() -> stopWatch.stop().start("fsHealth")); toClose.add(injector.getInstance(FsHealthService.class)); + toClose.add(() -> stopWatch.stop().start("resource_usage_tracker")); + toClose.add(injector.getInstance(NodeResourceUsageTracker.class)); + toClose.add(() -> stopWatch.stop().start("resource_usage_collector")); + toClose.add(injector.getInstance(ResourceUsageCollectorService.class)); toClose.add(() -> stopWatch.stop().start("gateway")); toClose.add(injector.getInstance(GatewayService.class)); toClose.add(() -> stopWatch.stop().start("search")); diff --git a/server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java b/server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java new file mode 100644 index 0000000000000..6ef66d4ac1914 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; + +/** + * This represents the resource usage stats of a node along with the timestamp at which the stats object was created + * in the respective node + */ +public class NodeResourceUsageStats implements Writeable { + final String nodeId; + long timestamp; + double cpuUtilizationPercent; + double memoryUtilizationPercent; + + public NodeResourceUsageStats(String nodeId, long timestamp, double memoryUtilizationPercent, double cpuUtilizationPercent) { + this.nodeId = nodeId; + this.timestamp = timestamp; + this.cpuUtilizationPercent = cpuUtilizationPercent; + this.memoryUtilizationPercent = memoryUtilizationPercent; + } + + public NodeResourceUsageStats(StreamInput in) throws IOException { + this.nodeId = in.readString(); + this.timestamp = in.readLong(); + this.cpuUtilizationPercent = in.readDouble(); + this.memoryUtilizationPercent = in.readDouble(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(this.nodeId); + out.writeLong(this.timestamp); + out.writeDouble(this.cpuUtilizationPercent); + out.writeDouble(this.memoryUtilizationPercent); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("NodeResourceUsageStats["); + sb.append(nodeId).append("]("); + sb.append("Timestamp: ").append(timestamp); + sb.append(", CPU utilization percent: ").append(String.format(Locale.ROOT, "%.1f", cpuUtilizationPercent)); + sb.append(", Memory utilization percent: ").append(String.format(Locale.ROOT, "%.1f", memoryUtilizationPercent)); + sb.append(")"); + return sb.toString(); + } + + NodeResourceUsageStats(NodeResourceUsageStats nodeResourceUsageStats) { + this( + nodeResourceUsageStats.nodeId, + nodeResourceUsageStats.timestamp, + nodeResourceUsageStats.memoryUtilizationPercent, + nodeResourceUsageStats.cpuUtilizationPercent + ); + } + + public double getMemoryUtilizationPercent() { + return memoryUtilizationPercent; + } + + public double getCpuUtilizationPercent() { + return cpuUtilizationPercent; + } + + public long getTimestamp() { + return timestamp; + } +} diff --git a/server/src/main/java/org/opensearch/node/NodeService.java b/server/src/main/java/org/opensearch/node/NodeService.java index 2688b894cb9a7..9bb07080fa717 100644 --- a/server/src/main/java/org/opensearch/node/NodeService.java +++ b/server/src/main/java/org/opensearch/node/NodeService.java @@ -83,6 +83,7 @@ public class NodeService implements Closeable { private final ScriptService scriptService; private final HttpServerTransport httpServerTransport; private final ResponseCollectorService responseCollectorService; + private final ResourceUsageCollectorService resourceUsageCollectorService; private final SearchTransportService searchTransportService; private final IndexingPressureService indexingPressureService; private final AggregationUsageService aggregationUsageService; @@ -114,7 +115,8 @@ public class NodeService implements Closeable { SearchBackpressureService searchBackpressureService, SearchPipelineService searchPipelineService, FileCache fileCache, - TaskCancellationMonitoringService taskCancellationMonitoringService + TaskCancellationMonitoringService taskCancellationMonitoringService, + ResourceUsageCollectorService resourceUsageCollectorService ) { this.settings = settings; this.threadPool = threadPool; @@ -137,6 +139,7 @@ public class NodeService implements Closeable { this.clusterService = clusterService; this.fileCache = fileCache; this.taskCancellationMonitoringService = taskCancellationMonitoringService; + this.resourceUsageCollectorService = resourceUsageCollectorService; clusterService.addStateApplier(ingestService); clusterService.addStateApplier(searchPipelineService); } @@ -217,7 +220,8 @@ public NodeStats stats( boolean weightedRoutingStats, boolean fileCacheStats, boolean taskCancellation, - boolean searchPipelineStats + boolean searchPipelineStats, + boolean resourceUsageStats ) { // for indices stats we want to include previous allocated shards stats as well (it will // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats) @@ -237,6 +241,7 @@ public NodeStats stats( discoveryStats ? discovery.stats() : null, ingest ? ingestService.stats() : null, adaptiveSelection ? responseCollectorService.getAdaptiveStats(searchTransportService.getPendingSearchRequests()) : null, + resourceUsageStats ? resourceUsageCollectorService.stats() : null, scriptCache ? scriptService.cacheStats() : null, indexingPressure ? this.indexingPressureService.nodeStats() : null, shardIndexingPressure ? this.indexingPressureService.shardStats(indices) : null, diff --git a/server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java b/server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java new file mode 100644 index 0000000000000..3dff9a27f71a8 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Locale; +import java.util.Map; + +/** + * This class represents resource usage stats such as CPU, Memory and IO resource usage of each node along with the + * timestamp of the stats recorded. + */ +public class NodesResourceUsageStats implements Writeable, ToXContentFragment { + + // Map of node id to resource usage stats of the corresponding node. + private final Map nodeIdToResourceUsageStatsMap; + + public NodesResourceUsageStats(Map nodeIdToResourceUsageStatsMap) { + this.nodeIdToResourceUsageStatsMap = nodeIdToResourceUsageStatsMap; + } + + public NodesResourceUsageStats(StreamInput in) throws IOException { + this.nodeIdToResourceUsageStatsMap = in.readMap(StreamInput::readString, NodeResourceUsageStats::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(this.nodeIdToResourceUsageStatsMap, StreamOutput::writeString, (stream, stats) -> stats.writeTo(stream)); + } + + /** + * Returns map of node id to resource usage stats of the corresponding node. + */ + public Map getNodeIdToResourceUsageStatsMap() { + return nodeIdToResourceUsageStatsMap; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("resource_usage_stats"); + for (String nodeId : nodeIdToResourceUsageStatsMap.keySet()) { + builder.startObject(nodeId); + NodeResourceUsageStats resourceUsageStats = nodeIdToResourceUsageStatsMap.get(nodeId); + if (resourceUsageStats != null) { + builder.field("timestamp", resourceUsageStats.timestamp); + builder.field("cpu_utilization_percent", String.format(Locale.ROOT, "%.1f", resourceUsageStats.cpuUtilizationPercent)); + builder.field( + "memory_utilization_percent", + String.format(Locale.ROOT, "%.1f", resourceUsageStats.memoryUtilizationPercent) + ); + } + builder.endObject(); + } + builder.endObject(); + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java b/server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java new file mode 100644 index 0000000000000..f1c763e09f147 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterStateListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.node.resource.tracker.NodeResourceUsageTracker; +import org.opensearch.threadpool.Scheduler; +import org.opensearch.threadpool.ThreadPool; + +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentMap; + +/** + * This collects node level resource usage statistics such as cpu, memory, IO of each node and makes it available for + * coordinator node to aid in throttling, ranking etc + */ +public class ResourceUsageCollectorService extends AbstractLifecycleComponent implements ClusterStateListener { + + /** + * This refresh interval denotes the polling interval of ResourceUsageCollectorService to refresh the resource usage + * stats from local node + */ + private static long REFRESH_INTERVAL_IN_MILLIS = 1000; + + private static final Logger logger = LogManager.getLogger(ResourceUsageCollectorService.class); + private final ConcurrentMap nodeIdToResourceUsageStats = ConcurrentCollections.newConcurrentMap(); + + private ThreadPool threadPool; + private volatile Scheduler.Cancellable scheduledFuture; + + private NodeResourceUsageTracker nodeResourceUsageTracker; + private ClusterService clusterService; + + public ResourceUsageCollectorService( + NodeResourceUsageTracker nodeResourceUsageTracker, + ClusterService clusterService, + ThreadPool threadPool + ) { + this.threadPool = threadPool; + this.nodeResourceUsageTracker = nodeResourceUsageTracker; + this.clusterService = clusterService; + clusterService.addListener(this); + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + if (event.nodesRemoved()) { + for (DiscoveryNode removedNode : event.nodesDelta().removedNodes()) { + removeNodeResourceUsageStats(removedNode.getId()); + } + } + } + + void removeNodeResourceUsageStats(String nodeId) { + nodeIdToResourceUsageStats.remove(nodeId); + } + + /** + * Collect node resource usage stats along with the timestamp + */ + public void collectNodeResourceUsageStats( + String nodeId, + long timestamp, + double memoryUtilizationPercent, + double cpuUtilizationPercent + ) { + nodeIdToResourceUsageStats.compute(nodeId, (id, resourceUsageStats) -> { + if (resourceUsageStats == null) { + return new NodeResourceUsageStats(nodeId, timestamp, memoryUtilizationPercent, cpuUtilizationPercent); + } else { + resourceUsageStats.cpuUtilizationPercent = cpuUtilizationPercent; + resourceUsageStats.memoryUtilizationPercent = memoryUtilizationPercent; + resourceUsageStats.timestamp = timestamp; + return resourceUsageStats; + } + }); + } + + /** + * Get all node resource usage statistics which will be used for node stats + */ + public Map getAllNodeStatistics() { + Map nodeStats = new HashMap<>(nodeIdToResourceUsageStats.size()); + nodeIdToResourceUsageStats.forEach((nodeId, resourceUsageStats) -> { + nodeStats.put(nodeId, new NodeResourceUsageStats(resourceUsageStats)); + }); + return nodeStats; + } + + /** + * Optionally return a {@code NodeResourceUsageStats} for the given nodeid, if + * resource usage stats information exists for the given node. Returns an empty + * {@code Optional} if the node was not found. + */ + public Optional getNodeStatistics(final String nodeId) { + return Optional.ofNullable(nodeIdToResourceUsageStats.get(nodeId)) + .map(resourceUsageStats -> new NodeResourceUsageStats(resourceUsageStats)); + } + + /** + * Returns collected resource usage statistics of all nodes + */ + public NodesResourceUsageStats stats() { + return new NodesResourceUsageStats(getAllNodeStatistics()); + } + + /** + * Fetch local node resource usage statistics and add it to store along with the current timestamp + */ + private void collectLocalNodeResourceUsageStats() { + if (nodeResourceUsageTracker.isReady() && clusterService.state() != null) { + collectNodeResourceUsageStats( + clusterService.state().nodes().getLocalNodeId(), + System.currentTimeMillis(), + nodeResourceUsageTracker.getMemoryUtilizationPercent(), + nodeResourceUsageTracker.getCpuUtilizationPercent() + ); + } + } + + @Override + protected void doStart() { + /** + * Fetch local node resource usage statistics every second + */ + scheduledFuture = threadPool.scheduleWithFixedDelay(() -> { + try { + collectLocalNodeResourceUsageStats(); + } catch (Exception e) { + logger.warn("failure in ResourceUsageCollectorService", e); + } + }, new TimeValue(REFRESH_INTERVAL_IN_MILLIS), ThreadPool.Names.GENERIC); + } + + @Override + protected void doStop() { + if (scheduledFuture != null) { + scheduledFuture.cancel(); + } + } + + @Override + protected void doClose() {} +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java new file mode 100644 index 0000000000000..f83a1b7f9fc05 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java @@ -0,0 +1,100 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.MovingAverage; +import org.opensearch.threadpool.Scheduler; +import org.opensearch.threadpool.ThreadPool; + +import java.util.concurrent.atomic.AtomicReference; + +/** + * Base class for sliding window resource usage trackers + */ +public abstract class AbstractAverageUsageTracker extends AbstractLifecycleComponent { + private static final Logger LOGGER = LogManager.getLogger(AbstractAverageUsageTracker.class); + + private final ThreadPool threadPool; + private final TimeValue pollingInterval; + private TimeValue windowDuration; + private final AtomicReference observations = new AtomicReference<>(); + + private volatile Scheduler.Cancellable scheduledFuture; + + public AbstractAverageUsageTracker(ThreadPool threadPool, TimeValue pollingInterval, TimeValue windowDuration) { + this.threadPool = threadPool; + this.pollingInterval = pollingInterval; + this.windowDuration = windowDuration; + this.setWindowSize(windowDuration); + } + + public abstract long getUsage(); + + /** + * Returns the moving average of the datapoints + */ + public double getAverage() { + return observations.get().getAverage(); + } + + /** + * Checks if we have datapoints more than or equal to the window size + */ + public boolean isReady() { + return observations.get().isReady(); + } + + /** + * Creates a new instance of MovingAverage with a new window size based on WindowDuration + */ + public void setWindowSize(TimeValue windowDuration) { + this.windowDuration = windowDuration; + int windowSize = (int) (windowDuration.nanos() / pollingInterval.nanos()); + LOGGER.debug("updated window size: {}", windowSize); + observations.set(new MovingAverage(windowSize)); + } + + public TimeValue getPollingInterval() { + return pollingInterval; + } + + public TimeValue getWindowDuration() { + return windowDuration; + } + + public long getWindowSize() { + return observations.get().getCount(); + } + + public void recordUsage(long usage) { + observations.get().record(usage); + } + + @Override + protected void doStart() { + scheduledFuture = threadPool.scheduleWithFixedDelay(() -> { + long usage = getUsage(); + recordUsage(usage); + }, pollingInterval, ThreadPool.Names.GENERIC); + } + + @Override + protected void doStop() { + if (scheduledFuture != null) { + scheduledFuture.cancel(); + } + } + + @Override + protected void doClose() {} +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/AverageCpuUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/AverageCpuUsageTracker.java new file mode 100644 index 0000000000000..160d385762eb0 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/AverageCpuUsageTracker.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.monitor.process.ProcessProbe; +import org.opensearch.threadpool.ThreadPool; + +/** + * AverageCpuUsageTracker tracks the average CPU usage by polling the CPU usage every (pollingInterval) + * and keeping track of the rolling average over a defined time window (windowDuration). + */ +public class AverageCpuUsageTracker extends AbstractAverageUsageTracker { + private static final Logger LOGGER = LogManager.getLogger(AverageCpuUsageTracker.class); + + public AverageCpuUsageTracker(ThreadPool threadPool, TimeValue pollingInterval, TimeValue windowDuration) { + super(threadPool, pollingInterval, windowDuration); + } + + /** + * Returns the process CPU usage in percent + */ + @Override + public long getUsage() { + long usage = ProcessProbe.getInstance().getProcessCpuPercent(); + LOGGER.debug("Recording cpu usage: {}%", usage); + return usage; + } + +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/AverageMemoryUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/AverageMemoryUsageTracker.java new file mode 100644 index 0000000000000..c1d1c83656859 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/AverageMemoryUsageTracker.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.threadpool.ThreadPool; + +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryMXBean; + +/** + * AverageMemoryUsageTracker tracks the average JVM usage by polling the JVM usage every (pollingInterval) + * and keeping track of the rolling average over a defined time window (windowDuration). + */ +public class AverageMemoryUsageTracker extends AbstractAverageUsageTracker { + + private static final Logger LOGGER = LogManager.getLogger(AverageMemoryUsageTracker.class); + + private static final MemoryMXBean MEMORY_MX_BEAN = ManagementFactory.getMemoryMXBean(); + + public AverageMemoryUsageTracker(ThreadPool threadPool, TimeValue pollingInterval, TimeValue windowDuration) { + super(threadPool, pollingInterval, windowDuration); + } + + /** + * Get current memory usage percentage calculated against max heap memory + */ + @Override + public long getUsage() { + long usage = MEMORY_MX_BEAN.getHeapMemoryUsage().getUsed() * 100 / MEMORY_MX_BEAN.getHeapMemoryUsage().getMax(); + LOGGER.debug("Recording memory usage: {}%", usage); + return usage; + } +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java new file mode 100644 index 0000000000000..cf5f38c1b004c --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java @@ -0,0 +1,118 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.threadpool.ThreadPool; + +/** + * This tracks the usage of node resources such as CPU, IO and memory + */ +public class NodeResourceUsageTracker extends AbstractLifecycleComponent { + private ThreadPool threadPool; + private final ClusterSettings clusterSettings; + private AverageCpuUsageTracker cpuUsageTracker; + private AverageMemoryUsageTracker memoryUsageTracker; + + private ResourceTrackerSettings resourceTrackerSettings; + + public NodeResourceUsageTracker(ThreadPool threadPool, Settings settings, ClusterSettings clusterSettings) { + this.threadPool = threadPool; + this.clusterSettings = clusterSettings; + this.resourceTrackerSettings = new ResourceTrackerSettings(settings); + initialize(); + } + + /** + * Return CPU utilization average if we have enough datapoints, otherwise return 0 + */ + public double getCpuUtilizationPercent() { + if (cpuUsageTracker.isReady()) { + return cpuUsageTracker.getAverage(); + } + return 0.0; + } + + /** + * Return memory utilization average if we have enough datapoints, otherwise return 0 + */ + public double getMemoryUtilizationPercent() { + if (memoryUsageTracker.isReady()) { + return memoryUsageTracker.getAverage(); + } + return 0.0; + } + + /** + * Checks if all of the resource usage trackers are ready + */ + public boolean isReady() { + return memoryUsageTracker.isReady() && cpuUsageTracker.isReady(); + } + + void initialize() { + cpuUsageTracker = new AverageCpuUsageTracker( + threadPool, + resourceTrackerSettings.getCpuPollingInterval(), + resourceTrackerSettings.getCpuWindowDuration() + ); + clusterSettings.addSettingsUpdateConsumer( + ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING, + this::setCpuWindowDuration + ); + + memoryUsageTracker = new AverageMemoryUsageTracker( + threadPool, + resourceTrackerSettings.getMemoryPollingInterval(), + resourceTrackerSettings.getMemoryWindowDuration() + ); + clusterSettings.addSettingsUpdateConsumer( + ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING, + this::setMemoryWindowDuration + ); + } + + private void setMemoryWindowDuration(TimeValue windowDuration) { + memoryUsageTracker.setWindowSize(windowDuration); + resourceTrackerSettings.setMemoryWindowDuration(windowDuration); + } + + private void setCpuWindowDuration(TimeValue windowDuration) { + cpuUsageTracker.setWindowSize(windowDuration); + resourceTrackerSettings.setCpuWindowDuration(windowDuration); + } + + /** + * Visible for testing + */ + ResourceTrackerSettings getResourceTrackerSettings() { + return resourceTrackerSettings; + } + + @Override + protected void doStart() { + cpuUsageTracker.doStart(); + memoryUsageTracker.doStart(); + } + + @Override + protected void doStop() { + cpuUsageTracker.doStop(); + memoryUsageTracker.doStop(); + } + + @Override + protected void doClose() { + cpuUsageTracker.doClose(); + memoryUsageTracker.doClose(); + } +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java b/server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java new file mode 100644 index 0000000000000..f81b008ba7e8b --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; + +/** + * Settings related to resource usage trackers such as polling interval, window duration etc + */ +public class ResourceTrackerSettings { + + private static class Defaults { + /** + * This is the default polling interval of usage trackers to get the resource utilization data + */ + private static final long POLLING_INTERVAL_IN_MILLIS = 500; + /** + * This is the default window duration on which the average resource utilization values will be calculated + */ + private static final long WINDOW_DURATION_IN_SECONDS = 30; + } + + public static final Setting GLOBAL_CPU_USAGE_AC_POLLING_INTERVAL_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_cpu_usage.polling_interval", + TimeValue.timeValueMillis(Defaults.POLLING_INTERVAL_IN_MILLIS), + Setting.Property.NodeScope + ); + public static final Setting GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_cpu_usage.window_duration", + TimeValue.timeValueSeconds(Defaults.WINDOW_DURATION_IN_SECONDS), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting GLOBAL_JVM_USAGE_AC_POLLING_INTERVAL_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_jvmmp.polling_interval", + TimeValue.timeValueMillis(Defaults.POLLING_INTERVAL_IN_MILLIS), + Setting.Property.NodeScope + ); + + public static final Setting GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_jvmmp.window_duration", + TimeValue.timeValueSeconds(Defaults.WINDOW_DURATION_IN_SECONDS), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + private volatile TimeValue cpuWindowDuration; + private volatile TimeValue cpuPollingInterval; + private volatile TimeValue memoryWindowDuration; + private volatile TimeValue memoryPollingInterval; + + public ResourceTrackerSettings(Settings settings) { + this.cpuPollingInterval = GLOBAL_CPU_USAGE_AC_POLLING_INTERVAL_SETTING.get(settings); + this.cpuWindowDuration = GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.get(settings); + this.memoryPollingInterval = GLOBAL_JVM_USAGE_AC_POLLING_INTERVAL_SETTING.get(settings); + this.memoryWindowDuration = GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.get(settings); + } + + public TimeValue getCpuWindowDuration() { + return this.cpuWindowDuration; + } + + public TimeValue getCpuPollingInterval() { + return cpuPollingInterval; + } + + public TimeValue getMemoryPollingInterval() { + return memoryPollingInterval; + } + + public TimeValue getMemoryWindowDuration() { + return memoryWindowDuration; + } + + public void setCpuWindowDuration(TimeValue cpuWindowDuration) { + this.cpuWindowDuration = cpuWindowDuration; + } + + public void setMemoryWindowDuration(TimeValue memoryWindowDuration) { + this.memoryWindowDuration = memoryWindowDuration; + } +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/package-info.java b/server/src/main/java/org/opensearch/node/resource/tracker/package-info.java new file mode 100644 index 0000000000000..aace2a019973e --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Node level resource usage stats tracker package + */ +package org.opensearch.node.resource.tracker; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index e0b35c69cc3c0..7a1b6f113d0e8 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -59,6 +59,8 @@ import org.opensearch.monitor.os.OsStats; import org.opensearch.monitor.process.ProcessStats; import org.opensearch.node.AdaptiveSelectionStats; +import org.opensearch.node.NodeResourceUsageStats; +import org.opensearch.node.NodesResourceUsageStats; import org.opensearch.node.ResponseCollectorService; import org.opensearch.script.ScriptCacheStats; import org.opensearch.script.ScriptStats; @@ -393,6 +395,24 @@ public void testSerialization() throws IOException { assertEquals(aStats.responseTime, bStats.responseTime, 0.01); }); } + NodesResourceUsageStats resourceUsageStats = nodeStats.getResourceUsageStats(); + NodesResourceUsageStats deserializedResourceUsageStats = deserializedNodeStats.getResourceUsageStats(); + if (resourceUsageStats == null) { + assertNull(deserializedResourceUsageStats); + } else { + resourceUsageStats.getNodeIdToResourceUsageStatsMap().forEach((k, v) -> { + NodeResourceUsageStats aResourceUsageStats = resourceUsageStats.getNodeIdToResourceUsageStatsMap().get(k); + NodeResourceUsageStats bResourceUsageStats = deserializedResourceUsageStats.getNodeIdToResourceUsageStatsMap() + .get(k); + assertEquals( + aResourceUsageStats.getMemoryUtilizationPercent(), + bResourceUsageStats.getMemoryUtilizationPercent(), + 0.0 + ); + assertEquals(aResourceUsageStats.getCpuUtilizationPercent(), bResourceUsageStats.getCpuUtilizationPercent(), 0.0); + assertEquals(aResourceUsageStats.getTimestamp(), bResourceUsageStats.getTimestamp()); + }); + } ScriptCacheStats scriptCacheStats = nodeStats.getScriptCacheStats(); ScriptCacheStats deserializedScriptCacheStats = deserializedNodeStats.getScriptCacheStats(); if (scriptCacheStats == null) { @@ -756,6 +776,30 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { } adaptiveSelectionStats = new AdaptiveSelectionStats(nodeConnections, nodeStats); } + NodesResourceUsageStats nodesResourceUsageStats = null; + if (frequently()) { + int numNodes = randomIntBetween(0, 10); + Map nodeConnections = new HashMap<>(); + Map resourceUsageStatsMap = new HashMap<>(); + for (int i = 0; i < numNodes; i++) { + String nodeId = randomAlphaOfLengthBetween(3, 10); + // add outgoing connection info + if (frequently()) { + nodeConnections.put(nodeId, randomLongBetween(0, 100)); + } + // add node calculations + if (frequently()) { + NodeResourceUsageStats stats = new NodeResourceUsageStats( + nodeId, + System.currentTimeMillis(), + randomDoubleBetween(1.0, 100.0, true), + randomDoubleBetween(1.0, 100.0, true) + ); + resourceUsageStatsMap.put(nodeId, stats); + } + } + nodesResourceUsageStats = new NodesResourceUsageStats(resourceUsageStatsMap); + } ClusterManagerThrottlingStats clusterManagerThrottlingStats = null; if (frequently()) { clusterManagerThrottlingStats = new ClusterManagerThrottlingStats(); @@ -787,6 +831,7 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { discoveryStats, ingestStats, adaptiveSelectionStats, + nodesResourceUsageStats, scriptCacheStats, null, null, diff --git a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java index 8ba965b3df1ab..64949cf861f70 100644 --- a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java @@ -190,6 +190,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -216,6 +217,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -242,6 +244,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ) ); @@ -299,6 +302,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -325,6 +329,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -351,6 +356,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ) ); diff --git a/server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java b/server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java new file mode 100644 index 0000000000000..b2fa884afab69 --- /dev/null +++ b/server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java @@ -0,0 +1,190 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.node.resource.tracker.NodeResourceUsageTracker; +import org.opensearch.node.resource.tracker.ResourceTrackerSettings; +import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.hamcrest.Matchers.greaterThan; + +/** + * Tests for ResourceUsageCollectorService where we test collect method, get method and whether schedulers + * are working as expected + */ +public class ResourceUsageCollectorServiceTests extends OpenSearchSingleNodeTestCase { + + private ClusterService clusterService; + private ResourceUsageCollectorService collector; + private ThreadPool threadpool; + NodeResourceUsageTracker tracker; + + @Before + public void setUp() throws Exception { + super.setUp(); + + threadpool = new TestThreadPool("resource_usage_collector_tests"); + + clusterService = createClusterService(threadpool); + + Settings settings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), new TimeValue(500, TimeUnit.MILLISECONDS)) + .build(); + tracker = new NodeResourceUsageTracker( + threadpool, + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + collector = new ResourceUsageCollectorService(tracker, clusterService, threadpool); + tracker.start(); + collector.start(); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + threadpool.shutdownNow(); + clusterService.close(); + collector.stop(); + tracker.stop(); + collector.close(); + tracker.close(); + } + + public void testResourceUsageStats() { + collector.collectNodeResourceUsageStats("node1", System.currentTimeMillis(), 97, 99); + Map nodeStats = collector.getAllNodeStatistics(); + assertTrue(nodeStats.containsKey("node1")); + assertEquals(99.0, nodeStats.get("node1").cpuUtilizationPercent, 0.0); + assertEquals(97.0, nodeStats.get("node1").memoryUtilizationPercent, 0.0); + + Optional nodeResourceUsageStatsOptional = collector.getNodeStatistics("node1"); + + assertNotNull(nodeResourceUsageStatsOptional.get()); + assertEquals(99.0, nodeResourceUsageStatsOptional.get().cpuUtilizationPercent, 0.0); + assertEquals(97.0, nodeResourceUsageStatsOptional.get().memoryUtilizationPercent, 0.0); + + nodeResourceUsageStatsOptional = collector.getNodeStatistics("node2"); + assertTrue(nodeResourceUsageStatsOptional.isEmpty()); + } + + public void testScheduler() throws Exception { + /** + * Wait for cluster state to be ready so that localNode().getId() is ready and we add the values to the map + */ + assertBusy(() -> assertTrue(collector.getNodeStatistics(clusterService.localNode().getId()).isPresent()), 1, TimeUnit.MINUTES); + assertTrue(collector.getNodeStatistics(clusterService.localNode().getId()).isPresent()); + /** + * Wait for memory utilization to be reported greater than 0 + */ + assertBusy( + () -> assertThat( + collector.getNodeStatistics(clusterService.localNode().getId()).get().getMemoryUtilizationPercent(), + greaterThan(0.0) + ), + 5, + TimeUnit.SECONDS + ); + assertTrue(collector.getNodeStatistics("Invalid").isEmpty()); + } + + /* + * Test that concurrently adding values and removing nodes does not cause exceptions + */ + public void testConcurrentAddingAndRemovingNodes() throws Exception { + String[] nodes = new String[] { "a", "b", "c", "d" }; + + final CountDownLatch latch = new CountDownLatch(5); + + Runnable f = () -> { + latch.countDown(); + try { + latch.await(); + } catch (InterruptedException e) { + fail("should not be interrupted"); + } + for (int i = 0; i < randomIntBetween(100, 200); i++) { + if (randomBoolean()) { + collector.removeNodeResourceUsageStats(randomFrom(nodes)); + } + collector.collectNodeResourceUsageStats( + randomFrom(nodes), + System.currentTimeMillis(), + randomIntBetween(1, 100), + randomIntBetween(1, 100) + ); + } + }; + + Thread t1 = new Thread(f); + Thread t2 = new Thread(f); + Thread t3 = new Thread(f); + Thread t4 = new Thread(f); + + t1.start(); + t2.start(); + t3.start(); + t4.start(); + latch.countDown(); + t1.join(); + t2.join(); + t3.join(); + t4.join(); + + final Map nodeStats = collector.getAllNodeStatistics(); + for (String nodeId : nodes) { + if (nodeStats.containsKey(nodeId)) { + assertThat(nodeStats.get(nodeId).memoryUtilizationPercent, greaterThan(0.0)); + assertThat(nodeStats.get(nodeId).cpuUtilizationPercent, greaterThan(0.0)); + } + } + } + + public void testNodeRemoval() { + collector.collectNodeResourceUsageStats("node1", System.currentTimeMillis(), randomIntBetween(1, 100), randomIntBetween(1, 100)); + collector.collectNodeResourceUsageStats("node2", System.currentTimeMillis(), randomIntBetween(1, 100), randomIntBetween(1, 100)); + + ClusterState previousState = ClusterState.builder(new ClusterName("cluster")) + .nodes( + DiscoveryNodes.builder() + .add(DiscoveryNode.createLocal(Settings.EMPTY, new TransportAddress(TransportAddress.META_ADDRESS, 9200), "node1")) + .add(DiscoveryNode.createLocal(Settings.EMPTY, new TransportAddress(TransportAddress.META_ADDRESS, 9201), "node2")) + ) + .build(); + ClusterState newState = ClusterState.builder(previousState) + .nodes(DiscoveryNodes.builder(previousState.nodes()).remove("node2")) + .build(); + ClusterChangedEvent event = new ClusterChangedEvent("test", newState, previousState); + + collector.clusterChanged(event); + final Map nodeStats = collector.getAllNodeStatistics(); + assertTrue(nodeStats.containsKey("node1")); + assertFalse(nodeStats.containsKey("node2")); + } +} diff --git a/server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java b/server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java new file mode 100644 index 0000000000000..374c993a264d4 --- /dev/null +++ b/server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.concurrent.TimeUnit; + +/** + * Tests to validate AverageMemoryUsageTracker and AverageCpuUsageTracker implementation + */ +public class AverageUsageTrackerTests extends OpenSearchTestCase { + ThreadPool threadPool; + AverageMemoryUsageTracker averageMemoryUsageTracker; + AverageCpuUsageTracker averageCpuUsageTracker; + + @Before + public void setup() { + threadPool = new TestThreadPool(getClass().getName()); + averageMemoryUsageTracker = new AverageMemoryUsageTracker( + threadPool, + new TimeValue(500, TimeUnit.MILLISECONDS), + new TimeValue(1000, TimeUnit.MILLISECONDS) + ); + averageCpuUsageTracker = new AverageCpuUsageTracker( + threadPool, + new TimeValue(500, TimeUnit.MILLISECONDS), + new TimeValue(1000, TimeUnit.MILLISECONDS) + ); + } + + @After + public void cleanup() { + ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); + } + + public void testBasicUsage() { + + assertAverageUsageStats(averageMemoryUsageTracker); + assertAverageUsageStats(averageCpuUsageTracker); + } + + public void testUpdateWindowSize() { + assertUpdateWindowSize(averageMemoryUsageTracker); + assertUpdateWindowSize(averageCpuUsageTracker); + } + + private void assertAverageUsageStats(AbstractAverageUsageTracker usageTracker) { + usageTracker.recordUsage(1); + assertFalse(usageTracker.isReady()); + usageTracker.recordUsage(2); + assertTrue(usageTracker.isReady()); + assertEquals(2, usageTracker.getWindowSize()); + assertEquals(1.5, usageTracker.getAverage(), 0.0); + usageTracker.recordUsage(5); + // ( 2 + 5 ) / 2 = 3.5 + assertEquals(3.5, usageTracker.getAverage(), 0.0); + } + + private void assertUpdateWindowSize(AbstractAverageUsageTracker usageTracker) { + usageTracker.recordUsage(1); + usageTracker.recordUsage(2); + + assertEquals(2, usageTracker.getWindowSize()); + assertEquals(1.5, usageTracker.getAverage(), 0.0); + usageTracker.recordUsage(5); + // ( 2 + 5 ) / 2 = 3.5 + assertEquals(3.5, usageTracker.getAverage(), 0.0); + + usageTracker.setWindowSize(new TimeValue(2000, TimeUnit.MILLISECONDS)); + assertEquals(0, usageTracker.getWindowSize()); + assertEquals(0.0, usageTracker.getAverage(), 0.0); + // verify 2000/500 = 4 is the window size and average is calculated on window size of 4 + usageTracker.recordUsage(1); + usageTracker.recordUsage(2); + usageTracker.recordUsage(1); + assertFalse(usageTracker.isReady()); + usageTracker.recordUsage(2); + assertTrue(usageTracker.isReady()); + assertEquals(4, usageTracker.getWindowSize()); + // (1 + 2 + 1 + 2 ) / 4 = 1.5 + assertEquals(1.5, usageTracker.getAverage(), 0.0); + usageTracker.recordUsage(2); + assertTrue(usageTracker.isReady()); + // ( 2 + 1 + 2 + 2 ) / 4 = 1.75 + assertEquals(1.75, usageTracker.getAverage(), 0.0); + } +} diff --git a/server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java b/server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java new file mode 100644 index 0000000000000..1ce68b9f29062 --- /dev/null +++ b/server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.concurrent.TimeUnit; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.greaterThan; + +/** + * Tests to assert resource usage trackers retrieving resource utilization averages + */ +public class NodeResourceUsageTrackerTests extends OpenSearchSingleNodeTestCase { + ThreadPool threadPool; + + @Before + public void setup() { + threadPool = new TestThreadPool(getClass().getName()); + } + + @After + public void cleanup() { + ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + ); + } + + public void testStats() throws Exception { + Settings settings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), new TimeValue(500, TimeUnit.MILLISECONDS)) + .build(); + NodeResourceUsageTracker tracker = new NodeResourceUsageTracker( + threadPool, + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + tracker.start(); + /** + * Asserting memory utilization to be greater than 0 + * cpu percent used is mostly 0, so skipping assertion for that + */ + assertBusy(() -> assertThat(tracker.getMemoryUtilizationPercent(), greaterThan(0.0)), 5, TimeUnit.SECONDS); + tracker.stop(); + tracker.close(); + } + + public void testUpdateSettings() { + NodeResourceUsageTracker tracker = new NodeResourceUsageTracker( + threadPool, + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + + assertEquals(tracker.getResourceTrackerSettings().getCpuWindowDuration().getSeconds(), 30); + assertEquals(tracker.getResourceTrackerSettings().getMemoryWindowDuration().getSeconds(), 30); + + Settings settings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), "10s") + .build(); + ClusterUpdateSettingsResponse response = client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings).get(); + assertEquals( + "10s", + response.getPersistentSettings().get(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey()) + ); + + Settings jvmsettings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), "5s") + .build(); + response = client().admin().cluster().prepareUpdateSettings().setPersistentSettings(jvmsettings).get(); + assertEquals( + "5s", + response.getPersistentSettings().get(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey()) + ); + } +} diff --git a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java index 6354cf18e8b62..a520b6278ea47 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java @@ -112,6 +112,7 @@ List adjustNodesStats(List nodesStats) { nodeStats.getDiscoveryStats(), nodeStats.getIngestStats(), nodeStats.getAdaptiveSelectionStats(), + nodeStats.getResourceUsageStats(), nodeStats.getScriptCacheStats(), nodeStats.getIndexingPressureStats(), nodeStats.getShardIndexingPressureStats(), diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index acdc5b94804c6..3c7423f73685f 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -2718,6 +2718,7 @@ public void ensureEstimatedStats() { false, false, false, + false, false ); assertThat( From dd6355910bb92af47f78970d50ca646a66b014be Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 10:15:31 -0400 Subject: [PATCH 32/33] Bump org.codehaus.woodstox:stax2-api from 4.2.1 to 4.2.2 in /plugins/repository-hdfs (#10639) * Bump org.codehaus.woodstox:stax2-api in /plugins/repository-hdfs Bumps [org.codehaus.woodstox:stax2-api](https://github.com/FasterXML/stax2-api) from 4.2.1 to 4.2.2. - [Commits](https://github.com/FasterXML/stax2-api/compare/stax2-api-4.2.1...stax2-api-4.2.2) --- updated-dependencies: - dependency-name: org.codehaus.woodstox:stax2-api dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 3 ++- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 | 1 - plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 2211f59574718..018ad6a676bcf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -94,6 +94,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) - Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) - Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294)) +- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) @@ -113,4 +114,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 1ce29afe0e69a..ed1f54888a26f 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -84,7 +84,7 @@ dependencies { api 'net.minidev:json-smart:2.5.0' api "io.netty:netty-all:${versions.netty}" implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" - implementation 'org.codehaus.woodstox:stax2-api:4.2.1' + implementation 'org.codehaus.woodstox:stax2-api:4.2.2' hdfsFixture project(':test:fixtures:hdfs-fixture') // Set the keytab files in the classpath so that we can access them from test code without the security manager diff --git a/plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 b/plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 deleted file mode 100644 index 2c12704cdc560..0000000000000 --- a/plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3f7325c52240418c2ba257b103c3c550e140c83 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 b/plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 new file mode 100644 index 0000000000000..b15a7ead0d016 --- /dev/null +++ b/plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 @@ -0,0 +1 @@ +b0d746cadea928e5264f2ea294ea9a1bf815bbde \ No newline at end of file From e8ba35cff433b3dc0d821d04326b2bf261beab06 Mon Sep 17 00:00:00 2001 From: Stephen Crawford <65832608+scrawfor99@users.noreply.github.com> Date: Mon, 16 Oct 2023 17:15:26 -0400 Subject: [PATCH 33/33] Remove the authenticate token method from TokenManager interface (#10614) Remove the authenticate token method from TokenManager interface Signed-off-by: Stephen Crawford Signed-off-by: Stephen Crawford <65832608+scrawfor99@users.noreply.github.com> --- .../identity/shiro/ShiroTokenManager.java | 16 ---------------- .../identity/shiro/AuthTokenHandlerTests.java | 5 ----- .../identity/noop/NoopTokenManager.java | 5 ----- .../opensearch/identity/tokens/TokenManager.java | 7 ------- 4 files changed, 33 deletions(-) diff --git a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java index ddfb99e626718..a14215aa7655b 100644 --- a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java +++ b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java @@ -10,13 +10,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.shiro.SecurityUtils; import org.apache.shiro.authc.AuthenticationToken; import org.apache.shiro.authc.UsernamePasswordToken; import org.opensearch.common.Randomness; import org.opensearch.identity.IdentityService; import org.opensearch.identity.Subject; -import org.opensearch.identity.noop.NoopSubject; import org.opensearch.identity.tokens.AuthToken; import org.opensearch.identity.tokens.BasicAuthToken; import org.opensearch.identity.tokens.OnBehalfOfClaims; @@ -88,20 +86,6 @@ public AuthToken issueServiceAccountToken(String audience) { return token; } - @Override - public Subject authenticateToken(AuthToken authToken) { - return new NoopSubject(); - } - - public boolean validateToken(AuthToken token) { - if (token instanceof BasicAuthToken) { - final BasicAuthToken basicAuthToken = (BasicAuthToken) token; - return basicAuthToken.getUser().equals(SecurityUtils.getSubject().toString()) - && basicAuthToken.getPassword().equals(shiroTokenPasswordMap.get(basicAuthToken)); - } - return false; - } - public String getTokenInfo(AuthToken token) { if (token instanceof BasicAuthToken) { final BasicAuthToken basicAuthToken = (BasicAuthToken) token; diff --git a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java index db77ced298991..f99484083e2fb 100644 --- a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java +++ b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java @@ -100,11 +100,6 @@ public void testShouldFailGetTokenInfo() { assertThrows(UnsupportedAuthenticationToken.class, () -> shiroAuthTokenHandler.getTokenInfo(bearerAuthToken)); } - public void testShouldFailValidateToken() { - final BearerAuthToken bearerAuthToken = new BearerAuthToken("header.payload.signature"); - assertFalse(shiroAuthTokenHandler.validateToken(bearerAuthToken)); - } - public void testShoudPassMapLookupWithToken() { final BasicAuthToken authToken = new BasicAuthToken("Basic dGVzdDp0ZTpzdA=="); shiroAuthTokenHandler.getShiroTokenPasswordMap().put(authToken, "te:st"); diff --git a/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java b/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java index 1dc3a58916b5c..fa6643b7447dc 100644 --- a/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java +++ b/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java @@ -50,9 +50,4 @@ public String asAuthHeaderValue() { } }; } - - @Override - public Subject authenticateToken(AuthToken authToken) { - return null; - } } diff --git a/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java b/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java index b4048251a06a2..972a9a1080955 100644 --- a/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java +++ b/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java @@ -30,11 +30,4 @@ public interface TokenManager { * @return a new auth token */ public AuthToken issueServiceAccountToken(final String audience); - - /** - * Authenticates a provided authToken - * @param authToken: The authToken to authenticate - * @return The authenticated subject - */ - public Subject authenticateToken(AuthToken authToken); }