From 784a473b3d3891e41afca3588dd3eda7a7bd3def Mon Sep 17 00:00:00 2001 From: Shourya Dutta Biswas <114977491+shourya035@users.noreply.github.com> Date: Mon, 21 Aug 2023 11:00:27 +0530 Subject: [PATCH 01/30] [Remote Store] Add cumulative bytes lag to NodesStats (#9393) --------- Signed-off-by: Shourya Dutta Biswas <114977491+shourya035@users.noreply.github.com> --- CHANGELOG.md | 2 +- .../indices/stats/IndexStatsIT.java | 1 + .../RemoteSegmentStatsFromNodesStatsIT.java | 12 ++- .../index/remote/RemoteSegmentStats.java | 87 +++++++++++++++---- .../cluster/node/stats/NodeStatsTests.java | 4 +- 5 files changed, 86 insertions(+), 20 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index efc19b5b77af9..e859874d1cbf1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,7 +48,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) - Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) - [Remote Store] Add Segment download stats to remotestore stats API ([#8718](https://github.com/opensearch-project/OpenSearch/pull/8718)) -- [Remote Store] Add remote segment transfer stats on NodesStats API ([#9168](https://github.com/opensearch-project/OpenSearch/pull/9168)) +- [Remote Store] Add remote segment transfer stats on NodesStats API ([#9168](https://github.com/opensearch-project/OpenSearch/pull/9168) [#9393](https://github.com/opensearch-project/OpenSearch/pull/9393)) - Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) ### Deprecated diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java index ef3c2c1235a3c..af5191d7d2039 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java @@ -1454,6 +1454,7 @@ private void assertZeroRemoteSegmentStats(RemoteSegmentStats remoteSegmentStats) assertEquals(0, remoteSegmentStats.getDownloadBytesStarted()); assertEquals(0, remoteSegmentStats.getDownloadBytesSucceeded()); assertEquals(0, remoteSegmentStats.getDownloadBytesFailed()); + assertEquals(0, remoteSegmentStats.getTotalRefreshBytesLag()); assertEquals(0, remoteSegmentStats.getMaxRefreshBytesLag()); assertEquals(0, remoteSegmentStats.getMaxRefreshTimeLag()); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSegmentStatsFromNodesStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSegmentStatsFromNodesStatsIT.java index 71a174e300fe8..19ad43b503ab7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSegmentStatsFromNodesStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSegmentStatsFromNodesStatsIT.java @@ -67,7 +67,7 @@ public void testNodesStatsParityWithOnlyPrimaryShards() { indexSingleDoc(secondIndex, true); long cumulativeUploadsSucceeded = 0, cumulativeUploadsStarted = 0, cumulativeUploadsFailed = 0; - long max_bytes_lag = 0, max_time_lag = 0; + long total_bytes_lag = 0, max_bytes_lag = 0, max_time_lag = 0; // Fetch upload stats RemoteStoreStatsResponse remoteStoreStatsFirstIndex = client(randomDataNode).admin() .cluster() @@ -77,6 +77,7 @@ public void testNodesStatsParityWithOnlyPrimaryShards() { cumulativeUploadsSucceeded += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesSucceeded; cumulativeUploadsStarted += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesStarted; cumulativeUploadsFailed += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesFailed; + total_bytes_lag += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag; max_bytes_lag = Math.max(max_bytes_lag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); max_time_lag = Math.max(max_time_lag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); @@ -85,9 +86,11 @@ public void testNodesStatsParityWithOnlyPrimaryShards() { .prepareRemoteStoreStats(secondIndex, "0") .setLocal(true) .get(); + cumulativeUploadsSucceeded += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesSucceeded; cumulativeUploadsStarted += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesStarted; cumulativeUploadsFailed += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesFailed; + total_bytes_lag += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag; max_bytes_lag = Math.max(max_bytes_lag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); max_time_lag = Math.max(max_time_lag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); @@ -101,6 +104,7 @@ public void testNodesStatsParityWithOnlyPrimaryShards() { assertEquals(cumulativeUploadsSucceeded, remoteSegmentStats.getUploadBytesSucceeded()); assertEquals(cumulativeUploadsStarted, remoteSegmentStats.getUploadBytesStarted()); assertEquals(cumulativeUploadsFailed, remoteSegmentStats.getUploadBytesFailed()); + assertEquals(total_bytes_lag, remoteSegmentStats.getTotalRefreshBytesLag()); assertEquals(max_bytes_lag, remoteSegmentStats.getMaxRefreshBytesLag()); assertEquals(max_time_lag, remoteSegmentStats.getMaxRefreshTimeLag()); } @@ -173,6 +177,7 @@ private void assertZeroRemoteSegmentStats(RemoteSegmentStats remoteSegmentStats) assertEquals(0, remoteSegmentStats.getDownloadBytesStarted()); assertEquals(0, remoteSegmentStats.getDownloadBytesSucceeded()); assertEquals(0, remoteSegmentStats.getDownloadBytesFailed()); + assertEquals(0, remoteSegmentStats.getTotalRefreshBytesLag()); assertEquals(0, remoteSegmentStats.getMaxRefreshBytesLag()); assertEquals(0, remoteSegmentStats.getMaxRefreshTimeLag()); } @@ -181,7 +186,7 @@ private static void assertNodeStatsParityAcrossNodes(String firstIndex, String s for (String dataNode : internalCluster().getDataNodeNames()) { long cumulativeUploadsSucceeded = 0, cumulativeUploadsStarted = 0, cumulativeUploadsFailed = 0; long cumulativeDownloadsSucceeded = 0, cumulativeDownloadsStarted = 0, cumulativeDownloadsFailed = 0; - long max_bytes_lag = 0, max_time_lag = 0; + long total_bytes_lag = 0, max_bytes_lag = 0, max_time_lag = 0; // Fetch upload stats RemoteStoreStatsResponse remoteStoreStatsFirstIndex = client(dataNode).admin() .cluster() @@ -197,6 +202,7 @@ private static void assertNodeStatsParityAcrossNodes(String firstIndex, String s .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesStarted; cumulativeDownloadsFailed += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0] .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesFailed; + total_bytes_lag += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag; max_bytes_lag = Math.max(max_bytes_lag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); max_time_lag = Math.max(max_time_lag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); @@ -214,6 +220,7 @@ private static void assertNodeStatsParityAcrossNodes(String firstIndex, String s .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesStarted; cumulativeDownloadsFailed += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0] .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesFailed; + total_bytes_lag += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag; max_bytes_lag = Math.max(max_bytes_lag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); max_time_lag = Math.max(max_time_lag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); @@ -230,6 +237,7 @@ private static void assertNodeStatsParityAcrossNodes(String firstIndex, String s assertEquals(cumulativeDownloadsSucceeded, remoteSegmentStats.getDownloadBytesSucceeded()); assertEquals(cumulativeDownloadsStarted, remoteSegmentStats.getDownloadBytesStarted()); assertEquals(cumulativeDownloadsFailed, remoteSegmentStats.getDownloadBytesFailed()); + assertEquals(total_bytes_lag, remoteSegmentStats.getTotalRefreshBytesLag()); assertEquals(max_bytes_lag, remoteSegmentStats.getMaxRefreshBytesLag()); assertEquals(max_time_lag, remoteSegmentStats.getMaxRefreshTimeLag()); } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java index f834f4ad9583d..0ff61d49c00f8 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java @@ -8,6 +8,7 @@ package org.opensearch.index.remote; +import org.opensearch.Version; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; @@ -61,6 +62,11 @@ public class RemoteSegmentStats implements Writeable, ToXContentFragment { * Used to check for data freshness in the remote store */ private long maxRefreshBytesLag; + /** + * Total refresh lag (in bytes) between local and the remote store + * Used to check for data freshness in the remote store + */ + private long totalRefreshBytesLag; public RemoteSegmentStats() {} @@ -73,6 +79,19 @@ public RemoteSegmentStats(StreamInput in) throws IOException { downloadBytesSucceeded = in.readLong(); maxRefreshTimeLag = in.readLong(); maxRefreshBytesLag = in.readLong(); + /* TODO: + Adding version checks here since the base PR of adding remote store stats + in SegmentStats has already been merged and backported to 2.x branch. + + Since this is a new field that is being added, we need to have this check in place + to ensure BWCs don't break. + + This would have to be removed after the new field addition PRs are also backported to 2.x. + If possible we would need to ensure that all field addition PRs are backported at once + */ + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + totalRefreshBytesLag = in.readLong(); + } } /** @@ -91,7 +110,11 @@ public RemoteSegmentStats(RemoteSegmentTransferTracker.Stats trackerStats) { this.downloadBytesStarted = trackerStats.directoryFileTransferTrackerStats.transferredBytesStarted; this.downloadBytesFailed = trackerStats.directoryFileTransferTrackerStats.transferredBytesFailed; this.maxRefreshTimeLag = trackerStats.refreshTimeLagMs; + // Initializing both total and max bytes lag to the same `bytesLag` + // value from the tracker object + // Aggregations would be performed on the add method this.maxRefreshBytesLag = trackerStats.bytesLag; + this.totalRefreshBytesLag = trackerStats.bytesLag; } // Getter and setters. All are visible for testing @@ -155,8 +178,16 @@ public long getMaxRefreshBytesLag() { return maxRefreshBytesLag; } - public void setMaxRefreshBytesLag(long maxRefreshBytesLag) { - this.maxRefreshBytesLag = maxRefreshBytesLag; + public void addMaxRefreshBytesLag(long maxRefreshBytesLag) { + this.maxRefreshBytesLag = Math.max(this.maxRefreshBytesLag, maxRefreshBytesLag); + } + + public long getTotalRefreshBytesLag() { + return totalRefreshBytesLag; + } + + public void addTotalRefreshBytesLag(long totalRefreshBytesLag) { + this.totalRefreshBytesLag += totalRefreshBytesLag; } /** @@ -174,6 +205,7 @@ public void add(RemoteSegmentStats existingStats) { this.downloadBytesSucceeded += existingStats.getDownloadBytesSucceeded(); this.maxRefreshTimeLag = Math.max(this.maxRefreshTimeLag, existingStats.getMaxRefreshTimeLag()); this.maxRefreshBytesLag = Math.max(this.maxRefreshBytesLag, existingStats.getMaxRefreshBytesLag()); + this.totalRefreshBytesLag += existingStats.getTotalRefreshBytesLag(); } } @@ -187,33 +219,53 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(downloadBytesSucceeded); out.writeLong(maxRefreshTimeLag); out.writeLong(maxRefreshBytesLag); + /* TODO: + Adding version checks here since the base PR of adding remote store stats + in SegmentStats has already been merged and backported to 2.x branch. + + Since this is a new field that is being added, we need to have this check in place + to ensure BWCs don't break. + + This would have to be removed after the new field addition PRs are also backported to 2.x. + If possible we would need to ensure that all field addition PRs are backported at once + */ + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeLong(totalRefreshBytesLag); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.REMOTE_STORE); builder.startObject(Fields.UPLOAD); + buildUploadStats(builder); + builder.endObject(); + builder.startObject(Fields.DOWNLOAD); + buildDownloadStats(builder); + builder.endObject(); + builder.endObject(); + return builder; + } + + private void buildUploadStats(XContentBuilder builder) throws IOException { builder.startObject(Fields.TOTAL_UPLOADS); builder.humanReadableField(Fields.STARTED_BYTES, Fields.STARTED, new ByteSizeValue(uploadBytesStarted)); builder.humanReadableField(Fields.SUCCEEDED_BYTES, Fields.SUCCEEDED, new ByteSizeValue(uploadBytesSucceeded)); builder.humanReadableField(Fields.FAILED_BYTES, Fields.FAILED, new ByteSizeValue(uploadBytesFailed)); builder.endObject(); - builder.humanReadableField(Fields.MAX_REFRESH_TIME_LAG_IN_MILLIS, Fields.MAX_REFRESH_TIME_LAG, new TimeValue(maxRefreshTimeLag)); - builder.humanReadableField( - Fields.MAX_REFRESH_SIZE_LAG_IN_MILLIS, - Fields.MAX_REFRESH_SIZE_LAG, - new ByteSizeValue(maxRefreshBytesLag) - ); + builder.startObject(Fields.REFRESH_SIZE_LAG); + builder.humanReadableField(Fields.TOTAL_BYTES, Fields.TOTAL, new ByteSizeValue(totalRefreshBytesLag)); + builder.humanReadableField(Fields.MAX_BYTES, Fields.MAX, new ByteSizeValue(maxRefreshBytesLag)); builder.endObject(); - builder.startObject(Fields.DOWNLOAD); + builder.humanReadableField(Fields.MAX_REFRESH_TIME_LAG_IN_MILLIS, Fields.MAX_REFRESH_TIME_LAG, new TimeValue(maxRefreshTimeLag)); + } + + private void buildDownloadStats(XContentBuilder builder) throws IOException { builder.startObject(Fields.TOTAL_DOWNLOADS); builder.humanReadableField(Fields.STARTED_BYTES, Fields.STARTED, new ByteSizeValue(downloadBytesStarted)); builder.humanReadableField(Fields.SUCCEEDED_BYTES, Fields.SUCCEEDED, new ByteSizeValue(downloadBytesSucceeded)); builder.humanReadableField(Fields.FAILED_BYTES, Fields.FAILED, new ByteSizeValue(downloadBytesFailed)); builder.endObject(); - builder.endObject(); - builder.endObject(); - return builder; } static final class Fields { @@ -222,15 +274,18 @@ static final class Fields { static final String DOWNLOAD = "download"; static final String TOTAL_UPLOADS = "total_uploads"; static final String TOTAL_DOWNLOADS = "total_downloads"; + static final String MAX_REFRESH_TIME_LAG = "max_refresh_time_lag"; + static final String MAX_REFRESH_TIME_LAG_IN_MILLIS = "max_refresh_time_lag_in_millis"; + static final String REFRESH_SIZE_LAG = "refresh_size_lag"; static final String STARTED = "started"; static final String STARTED_BYTES = "started_bytes"; static final String FAILED = "failed"; static final String FAILED_BYTES = "failed_bytes"; static final String SUCCEEDED = "succeeded"; static final String SUCCEEDED_BYTES = "succeeded_bytes"; - static final String MAX_REFRESH_TIME_LAG = "max_refresh_time_lag"; - static final String MAX_REFRESH_TIME_LAG_IN_MILLIS = "max_refresh_time_lag_in_millis"; - static final String MAX_REFRESH_SIZE_LAG = "max_refresh_size_lag"; - static final String MAX_REFRESH_SIZE_LAG_IN_MILLIS = "max_refresh_size_lag_in_bytes"; + static final String TOTAL = "total"; + static final String TOTAL_BYTES = "total_bytes"; + static final String MAX = "max"; + static final String MAX_BYTES = "max_bytes"; } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index e6460e429bd42..fbe70748adf2d 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -459,6 +459,7 @@ public void testSerialization() throws IOException { assertEquals(remoteSegmentStats.getUploadBytesFailed(), deserializedRemoteSegmentStats.getUploadBytesFailed()); assertEquals(remoteSegmentStats.getMaxRefreshTimeLag(), deserializedRemoteSegmentStats.getMaxRefreshTimeLag()); assertEquals(remoteSegmentStats.getMaxRefreshBytesLag(), deserializedRemoteSegmentStats.getMaxRefreshBytesLag()); + assertEquals(remoteSegmentStats.getTotalRefreshBytesLag(), deserializedRemoteSegmentStats.getTotalRefreshBytesLag()); } } } @@ -789,7 +790,8 @@ private static NodeIndicesStats getNodeIndicesStats(boolean remoteStoreStats) { remoteSegmentStats.addDownloadBytesStarted(10L); remoteSegmentStats.addDownloadBytesSucceeded(10L); remoteSegmentStats.addDownloadBytesFailed(1L); - remoteSegmentStats.setMaxRefreshBytesLag(5L); + remoteSegmentStats.addTotalRefreshBytesLag(5L); + remoteSegmentStats.addMaxRefreshBytesLag(2L); remoteSegmentStats.setMaxRefreshTimeLag(2L); } return indicesStats; From 61c5f173b9b6dba7de84423be5a9db283e25291b Mon Sep 17 00:00:00 2001 From: Poojita Raj Date: Mon, 21 Aug 2023 08:53:48 -0700 Subject: [PATCH 02/30] Fix failing test in ShardMovementStrategyTests (#9420) Signed-off-by: Poojita Raj --- .../allocator/BalancedShardsAllocator.java | 18 +++++++++++---- .../allocator/LocalShardsBalancer.java | 22 +------------------ .../routing/ShardMovementStrategyTests.java | 11 +++++----- 3 files changed, 20 insertions(+), 31 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 19e0e318eb805..90eff50fd9b5d 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -169,12 +169,25 @@ public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSetting clusterSettings.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold); } + /** + * Changes in deprecated setting SHARD_MOVE_PRIMARY_FIRST_SETTING affect value of its replacement setting SHARD_MOVEMENT_STRATEGY_SETTING. + */ private void setMovePrimaryFirst(boolean movePrimaryFirst) { this.movePrimaryFirst = movePrimaryFirst; + setShardMovementStrategy(this.shardMovementStrategy); } + /** + * Sets the correct Shard movement strategy to use. + * If users are still using deprecated setting `move_primary_first`, we want behavior to remain unchanged. + * In the event of changing ShardMovementStrategy setting from default setting NO_PREFERENCE to either PRIMARY_FIRST or REPLICA_FIRST, we want that + * to have priority over values set in move_primary_first setting. + */ private void setShardMovementStrategy(ShardMovementStrategy shardMovementStrategy) { this.shardMovementStrategy = shardMovementStrategy; + if (shardMovementStrategy == ShardMovementStrategy.NO_PREFERENCE && this.movePrimaryFirst) { + this.shardMovementStrategy = ShardMovementStrategy.PRIMARY_FIRST; + } } private void setWeightFunction(float indexBalance, float shardBalanceFactor) { @@ -205,7 +218,6 @@ public void allocate(RoutingAllocation allocation) { final ShardsBalancer localShardsBalancer = new LocalShardsBalancer( logger, allocation, - movePrimaryFirst, shardMovementStrategy, weightFunction, threshold, @@ -227,7 +239,6 @@ public ShardAllocationDecision decideShardAllocation(final ShardRouting shard, f ShardsBalancer localShardsBalancer = new LocalShardsBalancer( logger, allocation, - movePrimaryFirst, shardMovementStrategy, weightFunction, threshold, @@ -479,13 +490,12 @@ public static class Balancer extends LocalShardsBalancer { public Balancer( Logger logger, RoutingAllocation allocation, - boolean movePrimaryFirst, ShardMovementStrategy shardMovementStrategy, BalancedShardsAllocator.WeightFunction weight, float threshold, boolean preferPrimaryBalance ) { - super(logger, allocation, movePrimaryFirst, shardMovementStrategy, weight, threshold, preferPrimaryBalance); + super(logger, allocation, shardMovementStrategy, weight, threshold, preferPrimaryBalance); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index e1e6b696e3ad2..3365b58d92a63 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -58,7 +58,6 @@ public class LocalShardsBalancer extends ShardsBalancer { private final Map nodes; private final RoutingAllocation allocation; private final RoutingNodes routingNodes; - private final boolean movePrimaryFirst; private final ShardMovementStrategy shardMovementStrategy; private final boolean preferPrimaryBalance; @@ -75,7 +74,6 @@ public class LocalShardsBalancer extends ShardsBalancer { public LocalShardsBalancer( Logger logger, RoutingAllocation allocation, - boolean movePrimaryFirst, ShardMovementStrategy shardMovementStrategy, BalancedShardsAllocator.WeightFunction weight, float threshold, @@ -83,7 +81,6 @@ public LocalShardsBalancer( ) { this.logger = logger; this.allocation = allocation; - this.movePrimaryFirst = movePrimaryFirst; this.weight = weight; this.threshold = threshold; this.routingNodes = allocation.routingNodes(); @@ -531,22 +528,6 @@ private void checkAndAddInEligibleTargetNode(RoutingNode targetNode) { } } - /** - * Returns the correct Shard movement strategy to use. - * If users are still using deprecated setting "move_primary_first", we want behavior to remain unchanged. - * In the event of changing ShardMovementStrategy setting from default setting NO_PREFERENCE to either PRIMARY_FIRST or REPLICA_FIRST, we want that - * to have priority over values set in move_primary_first setting. - */ - private ShardMovementStrategy getShardMovementStrategy() { - if (shardMovementStrategy != ShardMovementStrategy.NO_PREFERENCE) { - return shardMovementStrategy; - } - if (movePrimaryFirst) { - return ShardMovementStrategy.PRIMARY_FIRST; - } - return ShardMovementStrategy.NO_PREFERENCE; - } - /** * Move started shards that can not be allocated to a node anymore * @@ -569,8 +550,7 @@ void moveShards() { checkAndAddInEligibleTargetNode(currentNode.getRoutingNode()); } boolean primariesThrottled = false; - for (Iterator it = allocation.routingNodes().nodeInterleavedShardIterator(getShardMovementStrategy()); it - .hasNext();) { + for (Iterator it = allocation.routingNodes().nodeInterleavedShardIterator(shardMovementStrategy); it.hasNext();) { // Verify if the cluster concurrent recoveries have been reached. if (allocation.deciders().canMoveAnyShard(allocation).type() != Decision.Type.YES) { logger.info( diff --git a/server/src/test/java/org/opensearch/cluster/routing/ShardMovementStrategyTests.java b/server/src/test/java/org/opensearch/cluster/routing/ShardMovementStrategyTests.java index d9f1e652f0b0a..7483e69fb0b0e 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/ShardMovementStrategyTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/ShardMovementStrategyTests.java @@ -55,24 +55,23 @@ private static Settings.Builder getSettings(ShardMovementStrategy shardMovementS .put("cluster.routing.allocation.move.primary_first", movePrimaryFirst); } - public void testClusterGreenAfterPartialRelocationPrimaryFirstShardMovementMovePrimarySettingEnabled() throws InterruptedException { + public void testClusterRelocationPrimaryFirstShardMovementMovePrimarySettingEnabled() throws InterruptedException { testClusterGreenAfterPartialRelocation(ShardMovementStrategy.PRIMARY_FIRST, true); } - public void testClusterGreenAfterPartialRelocationPrimaryFirstShardMovementMovePrimarySettingDisabled() throws InterruptedException { + public void testClusterRelocationPrimaryFirstShardMovementMovePrimarySettingDisabled() throws InterruptedException { testClusterGreenAfterPartialRelocation(ShardMovementStrategy.PRIMARY_FIRST, false); } - public void testClusterGreenAfterPartialRelocationReplicaFirstShardMovementPrimaryFirstEnabled() throws InterruptedException { + public void testClusterRelocationReplicaFirstShardMovementPrimaryFirstEnabled() throws InterruptedException { testClusterGreenAfterPartialRelocation(ShardMovementStrategy.REPLICA_FIRST, true); } - public void testClusterGreenAfterPartialRelocationReplicaFirstShardMovementPrimaryFirstDisabled() throws InterruptedException { + public void testClusterRelocationReplicaFirstShardMovementPrimaryFirstDisabled() throws InterruptedException { testClusterGreenAfterPartialRelocation(ShardMovementStrategy.REPLICA_FIRST, false); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9178") - public void testClusterGreenAfterPartialRelocationNoPreferenceShardMovementPrimaryFirstEnabled() throws InterruptedException { + public void testClusterRelocationNoPreferenceShardMovementPrimaryFirstEnabled() throws InterruptedException { testClusterGreenAfterPartialRelocation(ShardMovementStrategy.NO_PREFERENCE, true); } From 48d408733be9f184391cd724974d942191488a16 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Tue, 22 Aug 2023 03:53:05 +0000 Subject: [PATCH 03/30] Bugfix: remove assert on non-empty translog reader list (#9458) --------- Signed-off-by: Sachin Kale Co-authored-by: Sachin Kale --- .../java/org/opensearch/index/translog/RemoteFsTranslog.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 75b0f43c637ef..b23374a2cce3b 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -426,8 +426,11 @@ private void deleteStaleRemotePrimaryTerms() { // are older files that are no longer needed and should be cleaned up. In here, we delete all files that are part // of older primary term. if (olderPrimaryCleaned.trySet(Boolean.TRUE)) { + if (readers.isEmpty()) { + logger.trace("Translog reader list is empty, returning from deleteStaleRemotePrimaryTerms"); + return; + } // First we delete all stale primary terms folders from remote store - assert readers.isEmpty() == false : shardId + " Expected non-empty readers"; long minimumReferencedPrimaryTerm = readers.stream().map(BaseTranslogReader::getPrimaryTerm).min(Long::compare).get(); translogTransferManager.deletePrimaryTermsAsync(minimumReferencedPrimaryTerm); } From 9fb86a5d647635d030065f856fb5b4d95bec1097 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Aug 2023 22:16:43 -0700 Subject: [PATCH 04/30] Bump actions/setup-java from 2 to 3 (#9457) * Bump actions/setup-java from 2 to 3 Bumps [actions/setup-java](https://github.com/actions/setup-java) from 2 to 3. - [Release notes](https://github.com/actions/setup-java/releases) - [Commits](https://github.com/actions/setup-java/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/setup-java dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/lucene-snapshots.yml | 2 +- CHANGELOG.md | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lucene-snapshots.yml b/.github/workflows/lucene-snapshots.yml index 2ef476c190ed8..994b420cb5847 100644 --- a/.github/workflows/lucene-snapshots.yml +++ b/.github/workflows/lucene-snapshots.yml @@ -23,7 +23,7 @@ jobs: steps: - uses: actions/checkout@v2 - name: Set up JDK 17 - uses: actions/setup-java@v2 + uses: actions/setup-java@v3 with: java-version: '17' distribution: 'adopt' diff --git a/CHANGELOG.md b/CHANGELOG.md index e859874d1cbf1..63ab292192e64 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -117,6 +117,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `snakeyaml` from 2.0 to 2.1 ([#9269](https://github.com/opensearch-project/OpenSearch/pull/9269)) - Bump `aws-actions/configure-aws-credentials` from 1 to 2 ([#9302](https://github.com/opensearch-project/OpenSearch/pull/9302)) - Bump `com.github.luben:zstd-jni` from 1.5.5-3 to 1.5.5-5 ([#9431](https://github.com/opensearch-project/OpenSearch/pull/9431) +- Bump `actions/setup-java` from 2 to 3 ([#9457](https://github.com/opensearch-project/OpenSearch/pull/9457)) ### Changed - Default to mmapfs within hybridfs ([#8508](https://github.com/opensearch-project/OpenSearch/pull/8508)) @@ -161,4 +162,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.10...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.10...2.x \ No newline at end of file From dd75a225de2b9401b9f6413343c587004fc7a409 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Aug 2023 23:14:34 -0700 Subject: [PATCH 05/30] Bump org.apache.commons:commons-lang3 from 3.12.0 to 3.13.0 in /plugins/repository-azure (#9298) Bumps org.apache.commons:commons-lang3 from 3.12.0 to 3.13.0. --- updated-dependencies: - dependency-name: org.apache.commons:commons-lang3 - dependency-type: direct:production - update-type: version-update:semver-minor Signed-off-by: dependabot[bot] --- CHANGELOG.md | 2 +- plugins/repository-azure/build.gradle | 2 +- plugins/repository-azure/licenses/commons-lang3-3.12.0.jar.sha1 | 1 - plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 plugins/repository-azure/licenses/commons-lang3-3.12.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 63ab292192e64..06b47e7453b3c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -100,7 +100,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - OpenJDK Update (July 2023 Patch releases) ([#8868](https://github.com/opensearch-project/OpenSearch/pull/8868) - Bump `hadoop` libraries from 3.3.4 to 3.3.6 ([#6995](https://github.com/opensearch-project/OpenSearch/pull/6995)) - Bump `com.gradle.enterprise` from 3.13.3 to 3.14.1 ([#8996](https://github.com/opensearch-project/OpenSearch/pull/8996)) -- Bump `org.apache.commons:commons-lang3` from 3.12.0 to 3.13.0 ([#8995](https://github.com/opensearch-project/OpenSearch/pull/8995)) +- Bump `org.apache.commons:commons-lang3` from 3.12.0 to 3.13.0 ([#8995](https://github.com/opensearch-project/OpenSearch/pull/8995), [#9298](https://github.com/opensearch-project/OpenSearch/pull/9298)) - Bump `com.google.cloud:google-cloud-core-http` from 2.21.0 to 2.21.1 ([#8999](https://github.com/opensearch-project/OpenSearch/pull/8999)) - Bump `com.maxmind.geoip2:geoip2` from 4.0.1 to 4.1.0 ([#8998](https://github.com/opensearch-project/OpenSearch/pull/8998)) - Bump `org.apache.commons:commons-lang3` from 3.12.0 to 3.13.0 in /plugins/repository-hdfs ([#8997](https://github.com/opensearch-project/OpenSearch/pull/8997)) diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 7fd0a57189d7f..9da2e58c55271 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -70,7 +70,7 @@ dependencies { api 'org.codehaus.woodstox:stax2-api:4.2.1' implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" runtimeOnly "com.google.guava:guava:${versions.guava}" - api 'org.apache.commons:commons-lang3:3.12.0' + api 'org.apache.commons:commons-lang3:3.13.0' testImplementation project(':test:fixtures:azure-fixture') } diff --git a/plugins/repository-azure/licenses/commons-lang3-3.12.0.jar.sha1 b/plugins/repository-azure/licenses/commons-lang3-3.12.0.jar.sha1 deleted file mode 100644 index 9273d8c01aaba..0000000000000 --- a/plugins/repository-azure/licenses/commons-lang3-3.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6842c86792ff03b9f1d1fe2aab8dc23aa6c6f0e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 b/plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 new file mode 100644 index 0000000000000..d0c2f2486ee1f --- /dev/null +++ b/plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 @@ -0,0 +1 @@ +b7263237aa89c1f99b327197c41d0669707a462e \ No newline at end of file From ebdffbb2bf057096028aa3e6321be8a2ebc08ec4 Mon Sep 17 00:00:00 2001 From: Ankit Kala Date: Tue, 22 Aug 2023 18:16:26 +0530 Subject: [PATCH 06/30] [BUG] Add support to clear archived index settings (#9019) * Add support to clear archived index setting Signed-off-by: Ankit Kala --- CHANGELOG.md | 1 + .../settings/ArchivedIndexSettingsIT.java | 129 ++++++++++++++++++ .../MetadataUpdateSettingsService.java | 10 +- .../common/settings/IndexScopedSettings.java | 2 + .../opensearch/common/settings/Settings.java | 5 +- .../org/opensearch/index/IndexSettings.java | 4 +- .../common/settings/SettingsTests.java | 14 ++ 7 files changed, 161 insertions(+), 4 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/indices/settings/ArchivedIndexSettingsIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 06b47e7453b3c..cf70236502702 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -72,6 +72,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Fixed - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) - Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) +- Add support to clear archived index setting ([#9019](https://github.com/opensearch-project/OpenSearch/pull/9019)) ### Security diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/ArchivedIndexSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/ArchivedIndexSettingsIT.java new file mode 100644 index 0000000000000..20b0a6175c562 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/ArchivedIndexSettingsIT.java @@ -0,0 +1,129 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.settings; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import static org.hamcrest.Matchers.startsWith; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, supportsDedicatedMasters = false) +public class ArchivedIndexSettingsIT extends OpenSearchIntegTestCase { + private volatile boolean installPlugin; + + public void testArchiveSettings() throws Exception { + installPlugin = true; + // Set up the cluster with an index containing dummy setting(owned by dummy plugin) + String oldClusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + String oldDataNode = internalCluster().startDataOnlyNode(); + assertEquals(2, internalCluster().numDataAndClusterManagerNodes()); + createIndex("test"); + ensureYellow(); + // Add a dummy setting + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put("index.dummy", "foobar").put("index.dummy2", "foobar")) + .execute() + .actionGet(); + + // Remove dummy plugin and replace the cluster manager node so that the stale plugin setting moves to "archived". + installPlugin = false; + String newClusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(oldClusterManagerNode)); + internalCluster().restartNode(newClusterManagerNode); + + // Verify that archived settings exists. + assertTrue( + client().admin().indices().prepareGetSettings("test").get().getIndexToSettings().get("test").hasValue("archived.index.dummy") + ); + assertTrue( + client().admin().indices().prepareGetSettings("test").get().getIndexToSettings().get("test").hasValue("archived.index.dummy2") + ); + + // Archived setting update should fail on open index. + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().putNull("archived.index.dummy")) + .execute() + .actionGet() + ); + assertThat( + exception.getMessage(), + startsWith("Can't update non dynamic settings [[archived.index.dummy]] for open indices [[test") + ); + + // close the index. + client().admin().indices().prepareClose("test").get(); + + // Remove archived.index.dummy explicitly. + assertTrue( + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().putNull("archived.index.dummy")) + .execute() + .actionGet() + .isAcknowledged() + ); + + // Remove archived.index.dummy2 using wildcard. + assertTrue( + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().putNull("archived.*")) + .execute() + .actionGet() + .isAcknowledged() + ); + + // Verify that archived settings are cleaned up successfully. + assertFalse( + client().admin().indices().prepareGetSettings("test").get().getIndexToSettings().get("test").hasValue("archived.index.dummy") + ); + assertFalse( + client().admin().indices().prepareGetSettings("test").get().getIndexToSettings().get("test").hasValue("archived.index.dummy2") + ); + } + + @Override + protected Collection> nodePlugins() { + return installPlugin ? Arrays.asList(DummySettingPlugin.class) : Collections.emptyList(); + } + + public static class DummySettingPlugin extends Plugin { + public static final Setting DUMMY_SETTING = Setting.simpleString( + "index.dummy", + Setting.Property.IndexScope, + Setting.Property.Dynamic + ); + public static final Setting DUMMY_SETTING2 = Setting.simpleString( + "index.dummy2", + Setting.Property.IndexScope, + Setting.Property.Dynamic + ); + + @Override + public List> getSettings() { + return Arrays.asList(DUMMY_SETTING, DUMMY_SETTING2); + } + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java index d1e9642596cea..524980565fe39 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -73,6 +73,7 @@ import java.util.Set; import static org.opensearch.action.support.ContextPreservingActionListener.wrapPreservingContext; +import static org.opensearch.common.settings.AbstractScopedSettings.ARCHIVED_SETTINGS_PREFIX; import static org.opensearch.index.IndexSettings.same; /** @@ -135,12 +136,16 @@ public void updateSettings( indexScopedSettings.validate( normalizedSettings.filter(s -> Regex.isSimpleMatchPattern(s) == false), // don't validate wildcards false, // don't validate dependencies here we check it below never allow to change the number of shards + false, + true, // Ignore archived setting. true ); // validate internal or private index settings for (String key : normalizedSettings.keySet()) { Setting setting = indexScopedSettings.get(key); boolean isWildcard = setting == null && Regex.isSimpleMatchPattern(key); + boolean isArchived = key.startsWith(ARCHIVED_SETTINGS_PREFIX); assert setting != null // we already validated the normalized settings + || isArchived || (isWildcard && normalizedSettings.hasValue(key) == false) : "unknown setting: " + key + " isWildcard: " @@ -148,7 +153,8 @@ public void updateSettings( + " hasValue: " + normalizedSettings.hasValue(key); settingsForClosedIndices.copy(key, normalizedSettings); - if (isWildcard || setting.isDynamic()) { + // Only allow dynamic settings and wildcards for open indices. Skip archived settings. + if (isArchived == false && (isWildcard || setting.isDynamic())) { settingsForOpenIndices.copy(key, normalizedSettings); } else { skippedSettings.add(key); @@ -308,6 +314,8 @@ public ClusterState execute(ClusterState currentState) { Settings finalSettings = indexSettings.build(); indexScopedSettings.validate( finalSettings.filter(k -> indexScopedSettings.isPrivateSetting(k) == false), + true, + false, true ); metadataBuilder.put(IndexMetadata.builder(indexMetadata).settings(finalSettings)); diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index f14db4354f196..68d02151b50f5 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -76,6 +76,8 @@ public final class IndexScopedSettings extends AbstractScopedSettings { public static final Predicate INDEX_SETTINGS_KEY_PREDICATE = (s) -> s.startsWith(IndexMetadata.INDEX_SETTING_PREFIX); + public static final Predicate ARCHIVED_SETTINGS_KEY_PREDICATE = (s) -> s.startsWith(ARCHIVED_SETTINGS_PREFIX); + public static final Set> BUILT_IN_INDEX_SETTINGS = Collections.unmodifiableSet( new HashSet<>( Arrays.asList( diff --git a/server/src/main/java/org/opensearch/common/settings/Settings.java b/server/src/main/java/org/opensearch/common/settings/Settings.java index 8f3bf1fd66b81..ae10f38943e73 100644 --- a/server/src/main/java/org/opensearch/common/settings/Settings.java +++ b/server/src/main/java/org/opensearch/common/settings/Settings.java @@ -88,6 +88,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.common.settings.AbstractScopedSettings.ARCHIVED_SETTINGS_PREFIX; import static org.opensearch.common.unit.TimeValue.parseTimeValue; import static org.opensearch.core.common.unit.ByteSizeValue.parseBytesSizeValue; @@ -1217,7 +1218,7 @@ public boolean shouldRemoveMissingPlaceholder(String placeholderName) { } /** - * Checks that all settings in the builder start with the specified prefix. + * Checks that all settings(except archived settings and wildcards) in the builder start with the specified prefix. * * If a setting doesn't start with the prefix, the builder appends the prefix to such setting. */ @@ -1227,7 +1228,7 @@ public Builder normalizePrefix(String prefix) { while (iterator.hasNext()) { Map.Entry entry = iterator.next(); String key = entry.getKey(); - if (key.startsWith(prefix) == false && key.endsWith("*") == false) { + if (key.startsWith(prefix) == false && key.endsWith("*") == false && key.startsWith(ARCHIVED_SETTINGS_PREFIX) == false) { replacements.put(prefix + key, entry.getValue()); iterator.remove(); } diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 9ceb03974166f..ec719c99e163f 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -1147,7 +1147,9 @@ public synchronized boolean updateIndexMetadata(IndexMetadata indexMetadata) { */ public static boolean same(final Settings left, final Settings right) { return left.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE) - .equals(right.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE)); + .equals(right.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE)) + && left.filter(IndexScopedSettings.ARCHIVED_SETTINGS_KEY_PREDICATE) + .equals(right.filter(IndexScopedSettings.ARCHIVED_SETTINGS_KEY_PREDICATE)); } /** diff --git a/server/src/test/java/org/opensearch/common/settings/SettingsTests.java b/server/src/test/java/org/opensearch/common/settings/SettingsTests.java index 99dc171f5b1de..af4efabb341ee 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingsTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingsTests.java @@ -301,6 +301,20 @@ public void testPrefixNormalization() { assertThat(settings.get("foo.test"), equalTo("test")); } + public void testPrefixNormalizationArchived() { + Settings settings = Settings.builder().put("archived.foo.bar", "baz").normalizePrefix("foo.").build(); + + assertThat(settings.size(), equalTo(1)); + assertThat(settings.get("foo.archived.foo.bar"), nullValue()); + assertThat(settings.get("archived.foo.bar"), equalTo("baz")); + + settings = Settings.builder().put("archived.foo.*", "baz").normalizePrefix("foo.").build(); + + assertThat(settings.size(), equalTo(1)); + assertThat(settings.get("foo.archived.foo.*"), nullValue()); + assertThat(settings.get("archived.foo.*"), equalTo("baz")); + } + public void testFilteredMap() { Settings.Builder builder = Settings.builder(); builder.put("a", "a1"); From dc4ef8f4ef466c3509cc92a2fd52672d0e132008 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Aug 2023 09:30:14 -0700 Subject: [PATCH 07/30] Bump com.google.api:gax from 2.27.0 to 2.32.0 in /plugins/repository-gcs (#9300) * Bump com.google.api:gax from 2.27.0 to 2.32.0 in /plugins/repository-gcs updated-dependencies: - dependency-name: com.google.api:gax - dependency-type: direct:production - update-type: version-update:semver-minor ... * add 'com.google.auth.oauth2.GdchCredentials' into ignore missing classes list --------- Signed-off-by: dependabot[bot] Signed-off-by: Tianli Feng --- CHANGELOG.md | 1 + plugins/repository-gcs/build.gradle | 3 ++- plugins/repository-gcs/licenses/gax-2.27.0.jar.sha1 | 1 - plugins/repository-gcs/licenses/gax-2.32.0.jar.sha1 | 1 + 4 files changed, 4 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/gax-2.27.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/gax-2.32.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index cf70236502702..c64865a01eb37 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -119,6 +119,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `aws-actions/configure-aws-credentials` from 1 to 2 ([#9302](https://github.com/opensearch-project/OpenSearch/pull/9302)) - Bump `com.github.luben:zstd-jni` from 1.5.5-3 to 1.5.5-5 ([#9431](https://github.com/opensearch-project/OpenSearch/pull/9431) - Bump `actions/setup-java` from 2 to 3 ([#9457](https://github.com/opensearch-project/OpenSearch/pull/9457)) +- Bump `com.google.api:gax` from 2.27.0 to 2.32.0 ([#9300](https://github.com/opensearch-project/OpenSearch/pull/9300)) ### Changed - Default to mmapfs within hybridfs ([#8508](https://github.com/opensearch-project/OpenSearch/pull/8508)) diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 1f4104a929116..6d4e56cb81a11 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -53,7 +53,7 @@ versions << [ dependencies { api 'com.google.api:api-common:1.8.1' - api 'com.google.api:gax:2.27.0' + api 'com.google.api:gax:2.32.0' api 'com.google.api:gax-httpjson:0.103.1' api 'com.google.apis:google-api-services-storage:v1-rev20230617-2.0.0' @@ -148,6 +148,7 @@ thirdPartyAudit { 'com.google.appengine.api.urlfetch.HTTPResponse', 'com.google.appengine.api.urlfetch.URLFetchService', 'com.google.appengine.api.urlfetch.URLFetchServiceFactory', + 'com.google.auth.oauth2.GdchCredentials', 'com.google.protobuf.util.JsonFormat', 'com.google.protobuf.util.JsonFormat$Parser', 'com.google.protobuf.util.JsonFormat$Printer', diff --git a/plugins/repository-gcs/licenses/gax-2.27.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-2.27.0.jar.sha1 deleted file mode 100644 index 1813a3aa94404..0000000000000 --- a/plugins/repository-gcs/licenses/gax-2.27.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04a27757c9240da71f896be39f47aaa6e23ef989 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-2.32.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-2.32.0.jar.sha1 new file mode 100644 index 0000000000000..9cae74e1c3673 --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-2.32.0.jar.sha1 @@ -0,0 +1 @@ +522bf3c2a738847b9719eac8ce572be0f84da40a \ No newline at end of file From 60d272bc2fe875d7edd1e65ac87ac31999505ee1 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 22 Aug 2023 14:42:23 -0400 Subject: [PATCH 08/30] Github action for Gradle precommit failing on MacOS (#9486) Signed-off-by: Andriy Redko --- .github/workflows/precommit.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index fdb1d7d4262e4..0372d57dda91f 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -22,6 +22,10 @@ jobs: - name: Setup docker (missing on MacOS) if: runner.os == 'macos' run: | + # Workaround for https://github.com/actions/runner-images/issues/8104 + brew remove --ignore-dependencies qemu + curl -o ./qemu.rb https://raw.githubusercontent.com/Homebrew/homebrew-core/f88e30b3a23ef3735580f9b05535ce5a0a03c9e3/Formula/qemu.rb + brew install ./qemu.rb brew install docker colima start sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock From 5d3633cbecfaa1cdc4fcf5efd5ed1ec603de5081 Mon Sep 17 00:00:00 2001 From: Louis Chu Date: Tue, 22 Aug 2023 22:46:30 -0400 Subject: [PATCH 09/30] [Feature] Expose term frequency in Painless script score context (#9081) Add the following functions in Painless script score context: * termfreq * tf * totaltermfreq * sumtotaltermfreq Each of these maps to a Lucene value source. Signed-off-by: Louis Chu --- CHANGELOG.md | 3 +- .../expression/ExpressionScoreScript.java | 2 +- .../expression/ExpressionScriptEngine.java | 3 +- .../action/PainlessExecuteAction.java | 6 +- .../painless/spi/org.opensearch.score.txt | 4 + .../120_script_score_term_frequency.yml | 95 +++++++++++++++++++ .../expertscript/ExpertScriptPlugin.java | 14 ++- .../functionscore/ExplainableScriptIT.java | 9 +- .../ScriptScoreFunctionBuilder.java | 2 +- .../ScriptScoreQueryBuilder.java | 2 +- .../functionscore/TermFrequencyFunction.java | 22 +++++ .../TermFrequencyFunctionFactory.java | 95 +++++++++++++++++++ .../org/opensearch/script/ScoreScript.java | 16 +++- .../opensearch/script/ScoreScriptUtils.java | 88 +++++++++++++++++ .../lookup/LeafTermFrequencyLookup.java | 62 ++++++++++++ .../search/query/ScriptScoreQueryTests.java | 3 +- .../opensearch/script/MockScriptEngine.java | 5 +- 17 files changed, 411 insertions(+), 20 deletions(-) create mode 100644 modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/120_script_score_term_frequency.yml create mode 100644 server/src/main/java/org/opensearch/index/query/functionscore/TermFrequencyFunction.java create mode 100644 server/src/main/java/org/opensearch/index/query/functionscore/TermFrequencyFunctionFactory.java create mode 100644 server/src/main/java/org/opensearch/search/lookup/LeafTermFrequencyLookup.java diff --git a/CHANGELOG.md b/CHANGELOG.md index c64865a01eb37..199461fd93cd7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -88,6 +88,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Make SearchTemplateRequest implement IndicesRequest.Replaceable ([#9122]()https://github.com/opensearch-project/OpenSearch/pull/9122) - [BWC and API enforcement] Define the initial set of annotations, their meaning and relations between them ([#9223](https://github.com/opensearch-project/OpenSearch/pull/9223)) - [Segment Replication] Support realtime reads for GET requests ([#9212](https://github.com/opensearch-project/OpenSearch/pull/9212)) +- [Feature] Expose term frequency in Painless script score context ([#9081](https://github.com/opensearch-project/OpenSearch/pull/9081)) ### Dependencies - Bump `org.apache.logging.log4j:log4j-core` from 2.17.1 to 2.20.0 ([#8307](https://github.com/opensearch-project/OpenSearch/pull/8307)) @@ -164,4 +165,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.10...2.x \ No newline at end of file +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.10...2.x diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScoreScript.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScoreScript.java index 6be299146a181..3932559f7685c 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScoreScript.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScoreScript.java @@ -66,7 +66,7 @@ public boolean needs_score() { @Override public ScoreScript newInstance(final LeafReaderContext leaf) throws IOException { - return new ScoreScript(null, null, null) { + return new ScoreScript(null, null, null, null) { // Fake the scorer until setScorer is called. DoubleValues values = source.getValues(leaf, new DoubleValues() { @Override diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java index 1c3dc69359952..035d2402857e0 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java @@ -37,6 +37,7 @@ import org.apache.lucene.expressions.js.JavascriptCompiler; import org.apache.lucene.expressions.js.VariableContext; import org.apache.lucene.search.DoubleValuesSource; +import org.apache.lucene.search.IndexSearcher; import org.opensearch.SpecialPermission; import org.opensearch.common.Nullable; import org.opensearch.index.fielddata.IndexFieldData; @@ -110,7 +111,7 @@ public FilterScript.LeafFactory newFactory(Map params, SearchLoo contexts.put(ScoreScript.CONTEXT, (Expression expr) -> new ScoreScript.Factory() { @Override - public ScoreScript.LeafFactory newFactory(Map params, SearchLookup lookup) { + public ScoreScript.LeafFactory newFactory(Map params, SearchLookup lookup, IndexSearcher indexSearcher) { return newScoreScript(expr, lookup, params); } diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/action/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/opensearch/painless/action/PainlessExecuteAction.java index f5193b393ee88..67b298eee7973 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/action/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/action/PainlessExecuteAction.java @@ -558,7 +558,11 @@ static Response innerShardOperation(Request request, ScriptService scriptService } else if (scriptContext == ScoreScript.CONTEXT) { return prepareRamIndex(request, (context, leafReaderContext) -> { ScoreScript.Factory factory = scriptService.compile(request.script, ScoreScript.CONTEXT); - ScoreScript.LeafFactory leafFactory = factory.newFactory(request.getScript().getParams(), context.lookup()); + ScoreScript.LeafFactory leafFactory = factory.newFactory( + request.getScript().getParams(), + context.lookup(), + context.searcher() + ); ScoreScript scoreScript = leafFactory.newInstance(leafReaderContext); scoreScript.setDocument(0); diff --git a/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.score.txt b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.score.txt index 61d53608a30c8..5533f0bc55522 100644 --- a/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.score.txt +++ b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.score.txt @@ -23,6 +23,10 @@ class org.opensearch.script.ScoreScript @no_import { } static_import { + int termFreq(org.opensearch.script.ScoreScript, String, String) bound_to org.opensearch.script.ScoreScriptUtils$TermFreq + float tf(org.opensearch.script.ScoreScript, String, String) bound_to org.opensearch.script.ScoreScriptUtils$TF + long totalTermFreq(org.opensearch.script.ScoreScript, String, String) bound_to org.opensearch.script.ScoreScriptUtils$TotalTermFreq + long sumTotalTermFreq(org.opensearch.script.ScoreScript, String) bound_to org.opensearch.script.ScoreScriptUtils$SumTotalTermFreq double saturation(double, double) from_class org.opensearch.script.ScoreScriptUtils double sigmoid(double, double, double) from_class org.opensearch.script.ScoreScriptUtils double randomScore(org.opensearch.script.ScoreScript, int, String) bound_to org.opensearch.script.ScoreScriptUtils$RandomScoreField diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/120_script_score_term_frequency.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/120_script_score_term_frequency.yml new file mode 100644 index 0000000000000..b3ff66251938d --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/120_script_score_term_frequency.yml @@ -0,0 +1,95 @@ +--- +setup: + - skip: + version: " - 2.9.99" + reason: "termFreq functions for script_score was introduced in 2.10.0" + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + mappings: + properties: + f1: + type: keyword + f2: + type: text + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test", "_id": "doc1"}}' + - '{"f1": "v0", "f2": "v1"}' + - '{"index": {"_index": "test", "_id": "doc2"}}' + - '{"f2": "v2"}' + +--- +"Script score function using the termFreq function": + - do: + search: + index: test + rest_total_hits_as_int: true + body: + query: + function_score: + query: + match_all: {} + script_score: + script: + source: "termFreq(params.field, params.term)" + params: + field: "f1" + term: "v0" + - match: { hits.total: 2 } + - match: { hits.hits.0._id: "doc1" } + - match: { hits.hits.1._id: "doc2" } + - match: { hits.hits.0._score: 1.0 } + - match: { hits.hits.1._score: 0.0 } + +--- +"Script score function using the totalTermFreq function": + - do: + search: + index: test + rest_total_hits_as_int: true + body: + query: + function_score: + query: + match_all: {} + script_score: + script: + source: "if (doc[params.field].size() == 0) return params.default_value; else { return totalTermFreq(params.field, params.term); }" + params: + default_value: 0.5 + field: "f1" + term: "v0" + - match: { hits.total: 2 } + - match: { hits.hits.0._id: "doc1" } + - match: { hits.hits.1._id: "doc2" } + - match: { hits.hits.0._score: 1.0 } + - match: { hits.hits.1._score: 0.5 } + +--- +"Script score function using the sumTotalTermFreq function": + - do: + search: + index: test + rest_total_hits_as_int: true + body: + query: + function_score: + query: + match_all: {} + script_score: + script: + source: "if (doc[params.field].size() == 0) return params.default_value; else { return sumTotalTermFreq(params.field); }" + params: + default_value: 0.5 + field: "f1" + - match: { hits.total: 2 } + - match: { hits.hits.0._id: "doc1" } + - match: { hits.hits.1._id: "doc2" } + - match: { hits.hits.0._score: 1.0 } + - match: { hits.hits.1._score: 0.5 } diff --git a/plugins/examples/script-expert-scoring/src/main/java/org/opensearch/example/expertscript/ExpertScriptPlugin.java b/plugins/examples/script-expert-scoring/src/main/java/org/opensearch/example/expertscript/ExpertScriptPlugin.java index e7615d9ad7204..07c2d4d6435d7 100644 --- a/plugins/examples/script-expert-scoring/src/main/java/org/opensearch/example/expertscript/ExpertScriptPlugin.java +++ b/plugins/examples/script-expert-scoring/src/main/java/org/opensearch/example/expertscript/ExpertScriptPlugin.java @@ -35,6 +35,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.ScriptPlugin; @@ -120,20 +121,22 @@ public boolean isResultDeterministic() { @Override public LeafFactory newFactory( Map params, - SearchLookup lookup + SearchLookup lookup, + IndexSearcher indexSearcher ) { - return new PureDfLeafFactory(params, lookup); + return new PureDfLeafFactory(params, lookup, indexSearcher); } } private static class PureDfLeafFactory implements LeafFactory { private final Map params; private final SearchLookup lookup; + private final IndexSearcher indexSearcher; private final String field; private final String term; private PureDfLeafFactory( - Map params, SearchLookup lookup) { + Map params, SearchLookup lookup, IndexSearcher indexSearcher) { if (params.containsKey("field") == false) { throw new IllegalArgumentException( "Missing parameter [field]"); @@ -144,6 +147,7 @@ private PureDfLeafFactory( } this.params = params; this.lookup = lookup; + this.indexSearcher = indexSearcher; field = params.get("field").toString(); term = params.get("term").toString(); } @@ -163,7 +167,7 @@ public ScoreScript newInstance(LeafReaderContext context) * the field and/or term don't exist in this segment, * so always return 0 */ - return new ScoreScript(params, lookup, context) { + return new ScoreScript(params, lookup, indexSearcher, context) { @Override public double execute( ExplanationHolder explanation @@ -172,7 +176,7 @@ public double execute( } }; } - return new ScoreScript(params, lookup, context) { + return new ScoreScript(params, lookup, indexSearcher, context) { int currentDocid = -1; @Override public void setDocument(int docid) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java index 3651a7354e5de..f329677a94340 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java @@ -34,6 +34,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.IndexSearcher; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; @@ -93,7 +94,7 @@ public String getType() { public T compile(String scriptName, String scriptSource, ScriptContext context, Map params) { assert scriptSource.equals("explainable_script"); assert context == ScoreScript.CONTEXT; - ScoreScript.Factory factory = (params1, lookup) -> new ScoreScript.LeafFactory() { + ScoreScript.Factory factory = (params1, lookup, indexSearcher) -> new ScoreScript.LeafFactory() { @Override public boolean needs_score() { return false; @@ -101,7 +102,7 @@ public boolean needs_score() { @Override public ScoreScript newInstance(LeafReaderContext ctx) throws IOException { - return new MyScript(params1, lookup, ctx); + return new MyScript(params1, lookup, indexSearcher, ctx); } }; return context.factoryClazz.cast(factory); @@ -117,8 +118,8 @@ public Set> getSupportedContexts() { static class MyScript extends ScoreScript implements ExplainableScoreScript { - MyScript(Map params, SearchLookup lookup, LeafReaderContext leafContext) { - super(params, lookup, leafContext); + MyScript(Map params, SearchLookup lookup, IndexSearcher indexSearcher, LeafReaderContext leafContext) { + super(params, lookup, indexSearcher, leafContext); } @Override diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreFunctionBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreFunctionBuilder.java index e241211911502..3dadaeada2e60 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreFunctionBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreFunctionBuilder.java @@ -114,7 +114,7 @@ protected int doHashCode() { protected ScoreFunction doToFunction(QueryShardContext context) { try { ScoreScript.Factory factory = context.compile(script, ScoreScript.CONTEXT); - ScoreScript.LeafFactory searchScript = factory.newFactory(script.getParams(), context.lookup()); + ScoreScript.LeafFactory searchScript = factory.newFactory(script.getParams(), context.lookup(), context.searcher()); return new ScriptScoreFunction( script, searchScript, diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java index 51c4362b6e257..e302ebcee4ba7 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java @@ -187,7 +187,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException { ); } ScoreScript.Factory factory = context.compile(script, ScoreScript.CONTEXT); - ScoreScript.LeafFactory scoreScriptFactory = factory.newFactory(script.getParams(), context.lookup()); + ScoreScript.LeafFactory scoreScriptFactory = factory.newFactory(script.getParams(), context.lookup(), context.searcher()); final QueryBuilder queryBuilder = this.query; Query query = queryBuilder.toQuery(context); return new ScriptScoreQuery( diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/TermFrequencyFunction.java b/server/src/main/java/org/opensearch/index/query/functionscore/TermFrequencyFunction.java new file mode 100644 index 0000000000000..95fbecc53f4ae --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/functionscore/TermFrequencyFunction.java @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query.functionscore; + +import java.io.IOException; + +/** + * An interface representing a term frequency function used to compute document scores + * based on specific term frequency calculations. Implementations of this interface should + * provide a way to execute the term frequency function for a given document ID. + * + * @opensearch.internal + */ +public interface TermFrequencyFunction { + Object execute(int docId) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/TermFrequencyFunctionFactory.java b/server/src/main/java/org/opensearch/index/query/functionscore/TermFrequencyFunctionFactory.java new file mode 100644 index 0000000000000..4edcd34889abd --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/functionscore/TermFrequencyFunctionFactory.java @@ -0,0 +1,95 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query.functionscore; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.queries.function.FunctionValues; +import org.apache.lucene.queries.function.valuesource.SumTotalTermFreqValueSource; +import org.apache.lucene.queries.function.valuesource.TFValueSource; +import org.apache.lucene.queries.function.valuesource.TermFreqValueSource; +import org.apache.lucene.queries.function.valuesource.TotalTermFreqValueSource; +import org.apache.lucene.search.IndexSearcher; +import org.opensearch.common.lucene.BytesRefs; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + * A factory class for creating instances of {@link TermFrequencyFunction}. + * This class provides methods for creating different term frequency functions based on + * the specified function name, field, and term. Each term frequency function is designed + * to compute document scores based on specific term frequency calculations. + * + * @opensearch.internal + */ +public class TermFrequencyFunctionFactory { + public static TermFrequencyFunction createFunction( + TermFrequencyFunctionName functionName, + String field, + String term, + LeafReaderContext readerContext, + IndexSearcher indexSearcher + ) throws IOException { + switch (functionName) { + case TERM_FREQ: + TermFreqValueSource termFreqValueSource = new TermFreqValueSource(field, term, field, BytesRefs.toBytesRef(term)); + FunctionValues functionValues = termFreqValueSource.getValues(null, readerContext); + return docId -> functionValues.intVal(docId); + case TF: + TFValueSource tfValueSource = new TFValueSource(field, term, field, BytesRefs.toBytesRef(term)); + Map tfContext = new HashMap<>() { + { + put("searcher", indexSearcher); + } + }; + functionValues = tfValueSource.getValues(tfContext, readerContext); + return docId -> functionValues.floatVal(docId); + case TOTAL_TERM_FREQ: + TotalTermFreqValueSource totalTermFreqValueSource = new TotalTermFreqValueSource( + field, + term, + field, + BytesRefs.toBytesRef(term) + ); + Map ttfContext = new HashMap<>(); + totalTermFreqValueSource.createWeight(ttfContext, indexSearcher); + functionValues = totalTermFreqValueSource.getValues(ttfContext, readerContext); + return docId -> functionValues.longVal(docId); + case SUM_TOTAL_TERM_FREQ: + SumTotalTermFreqValueSource sumTotalTermFreqValueSource = new SumTotalTermFreqValueSource(field); + Map sttfContext = new HashMap<>(); + sumTotalTermFreqValueSource.createWeight(sttfContext, indexSearcher); + functionValues = sumTotalTermFreqValueSource.getValues(sttfContext, readerContext); + return docId -> functionValues.longVal(docId); + default: + throw new IllegalArgumentException("Unsupported function: " + functionName); + } + } + + /** + * An enumeration representing the names of supported term frequency functions. + */ + public enum TermFrequencyFunctionName { + TERM_FREQ("termFreq"), + TF("tf"), + TOTAL_TERM_FREQ("totalTermFreq"), + SUM_TOTAL_TERM_FREQ("sumTotalTermFreq"); + + private final String termFrequencyFunctionName; + + TermFrequencyFunctionName(String termFrequencyFunctionName) { + this.termFrequencyFunctionName = termFrequencyFunctionName; + } + + public String getTermFrequencyFunctionName() { + return termFrequencyFunctionName; + } + } +} diff --git a/server/src/main/java/org/opensearch/script/ScoreScript.java b/server/src/main/java/org/opensearch/script/ScoreScript.java index 5c6553ffc2a28..70de636a655f2 100644 --- a/server/src/main/java/org/opensearch/script/ScoreScript.java +++ b/server/src/main/java/org/opensearch/script/ScoreScript.java @@ -33,11 +33,14 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Scorable; import org.opensearch.Version; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.index.fielddata.ScriptDocValues; +import org.opensearch.index.query.functionscore.TermFrequencyFunctionFactory.TermFrequencyFunctionName; import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.LeafTermFrequencyLookup; import org.opensearch.search.lookup.SearchLookup; import org.opensearch.search.lookup.SourceLookup; @@ -107,6 +110,9 @@ public Explanation get(double score, Explanation subQueryExplanation) { /** A leaf lookup for the bound segment this script will operate on. */ private final LeafSearchLookup leafLookup; + /** A leaf term frequency lookup for the bound segment this script will operate on. */ + private final LeafTermFrequencyLookup leafTermFrequencyLookup; + private DoubleSupplier scoreSupplier = () -> 0.0; private final int docBase; @@ -115,16 +121,18 @@ public Explanation get(double score, Explanation subQueryExplanation) { private String indexName = null; private Version indexVersion = null; - public ScoreScript(Map params, SearchLookup lookup, LeafReaderContext leafContext) { + public ScoreScript(Map params, SearchLookup lookup, IndexSearcher indexSearcher, LeafReaderContext leafContext) { // null check needed b/c of expression engine subclass if (lookup == null) { assert params == null; assert leafContext == null; this.params = null; this.leafLookup = null; + this.leafTermFrequencyLookup = null; this.docBase = 0; } else { this.leafLookup = lookup.getLeafSearchLookup(leafContext); + this.leafTermFrequencyLookup = new LeafTermFrequencyLookup(indexSearcher, leafLookup); params = new HashMap<>(params); params.putAll(leafLookup.asMap()); this.params = new DynamicMap(params, PARAMS_FUNCTIONS); @@ -144,6 +152,10 @@ public Map> getDoc() { return leafLookup.doc(); } + public Object getTermFrequency(TermFrequencyFunctionName functionName, String field, String val) throws IOException { + return leafTermFrequencyLookup.getTermFrequency(functionName, field, val, docId); + } + /** Set the current document to run the script on next. */ public void setDocument(int docid) { this.docId = docid; @@ -268,7 +280,7 @@ public interface LeafFactory { */ public interface Factory extends ScriptFactory { - ScoreScript.LeafFactory newFactory(Map params, SearchLookup lookup); + ScoreScript.LeafFactory newFactory(Map params, SearchLookup lookup, IndexSearcher indexSearcher); } diff --git a/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java b/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java index 76d0a8bb44da0..0767c29fa1b31 100644 --- a/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java +++ b/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java @@ -48,6 +48,10 @@ import java.time.ZonedDateTime; import static org.opensearch.common.util.BitMixer.mix32; +import static org.opensearch.index.query.functionscore.TermFrequencyFunctionFactory.TermFrequencyFunctionName.SUM_TOTAL_TERM_FREQ; +import static org.opensearch.index.query.functionscore.TermFrequencyFunctionFactory.TermFrequencyFunctionName.TERM_FREQ; +import static org.opensearch.index.query.functionscore.TermFrequencyFunctionFactory.TermFrequencyFunctionName.TF; +import static org.opensearch.index.query.functionscore.TermFrequencyFunctionFactory.TermFrequencyFunctionName.TOTAL_TERM_FREQ; /** * Utilities for scoring scripts @@ -70,6 +74,90 @@ public static double sigmoid(double value, double k, double a) { return Math.pow(value, a) / (Math.pow(k, a) + Math.pow(value, a)); } + /** + * Retrieves the term frequency within a field for a specific term. + * + * @opensearch.internal + */ + public static final class TermFreq { + private final ScoreScript scoreScript; + + public TermFreq(ScoreScript scoreScript) { + this.scoreScript = scoreScript; + } + + public int termFreq(String field, String term) { + try { + return (int) scoreScript.getTermFrequency(TERM_FREQ, field, term); + } catch (Exception e) { + throw ExceptionsHelper.convertToOpenSearchException(e); + } + } + } + + /** + * Calculates the term frequency-inverse document frequency (tf-idf) for a specific term within a field. + * + * @opensearch.internal + */ + public static final class TF { + private final ScoreScript scoreScript; + + public TF(ScoreScript scoreScript) { + this.scoreScript = scoreScript; + } + + public float tf(String field, String term) { + try { + return (float) scoreScript.getTermFrequency(TF, field, term); + } catch (Exception e) { + throw ExceptionsHelper.convertToOpenSearchException(e); + } + } + } + + /** + * Retrieves the total term frequency within a field for a specific term. + * + * @opensearch.internal + */ + public static final class TotalTermFreq { + private final ScoreScript scoreScript; + + public TotalTermFreq(ScoreScript scoreScript) { + this.scoreScript = scoreScript; + } + + public long totalTermFreq(String field, String term) { + try { + return (long) scoreScript.getTermFrequency(TOTAL_TERM_FREQ, field, term); + } catch (Exception e) { + throw ExceptionsHelper.convertToOpenSearchException(e); + } + } + } + + /** + * Retrieves the sum of total term frequencies within a field. + * + * @opensearch.internal + */ + public static final class SumTotalTermFreq { + private final ScoreScript scoreScript; + + public SumTotalTermFreq(ScoreScript scoreScript) { + this.scoreScript = scoreScript; + } + + public long sumTotalTermFreq(String field) { + try { + return (long) scoreScript.getTermFrequency(SUM_TOTAL_TERM_FREQ, field, null); + } catch (Exception e) { + throw ExceptionsHelper.convertToOpenSearchException(e); + } + } + } + /** * random score based on the documents' values of the given field * diff --git a/server/src/main/java/org/opensearch/search/lookup/LeafTermFrequencyLookup.java b/server/src/main/java/org/opensearch/search/lookup/LeafTermFrequencyLookup.java new file mode 100644 index 0000000000000..d02313ada1db9 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/lookup/LeafTermFrequencyLookup.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.lookup; + +import org.apache.lucene.search.IndexSearcher; +import org.opensearch.index.query.functionscore.TermFrequencyFunction; +import org.opensearch.index.query.functionscore.TermFrequencyFunctionFactory; +import org.opensearch.index.query.functionscore.TermFrequencyFunctionFactory.TermFrequencyFunctionName; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +/** + * Looks up term frequency per-segment + * + * @opensearch.internal + */ +public class LeafTermFrequencyLookup { + + private final IndexSearcher indexSearcher; + private final LeafSearchLookup leafLookup; + private final Map termFreqCache; + + public LeafTermFrequencyLookup(IndexSearcher indexSearcher, LeafSearchLookup leafLookup) { + this.indexSearcher = indexSearcher; + this.leafLookup = leafLookup; + this.termFreqCache = new HashMap<>(); + } + + public Object getTermFrequency(TermFrequencyFunctionName functionName, String field, String val, int docId) throws IOException { + TermFrequencyFunction termFrequencyFunction = getOrCreateTermFrequencyFunction(functionName, field, val); + return termFrequencyFunction.execute(docId); + } + + private TermFrequencyFunction getOrCreateTermFrequencyFunction(TermFrequencyFunctionName functionName, String field, String val) + throws IOException { + String cacheKey = (val == null) + ? String.format(Locale.ROOT, "%s-%s", functionName, field) + : String.format(Locale.ROOT, "%s-%s-%s", functionName, field, val); + + if (!termFreqCache.containsKey(cacheKey)) { + TermFrequencyFunction termFrequencyFunction = TermFrequencyFunctionFactory.createFunction( + functionName, + field, + val, + leafLookup.ctx, + indexSearcher + ); + termFreqCache.put(cacheKey, termFrequencyFunction); + } + + return termFreqCache.get(cacheKey); + } +} diff --git a/server/src/test/java/org/opensearch/search/query/ScriptScoreQueryTests.java b/server/src/test/java/org/opensearch/search/query/ScriptScoreQueryTests.java index e1002e114822e..ca4b7dc49f6f0 100644 --- a/server/src/test/java/org/opensearch/search/query/ScriptScoreQueryTests.java +++ b/server/src/test/java/org/opensearch/search/query/ScriptScoreQueryTests.java @@ -184,6 +184,7 @@ private ScoreScript.LeafFactory newFactory( ) { SearchLookup lookup = mock(SearchLookup.class); LeafSearchLookup leafLookup = mock(LeafSearchLookup.class); + IndexSearcher indexSearcher = mock(IndexSearcher.class); when(lookup.getLeafSearchLookup(any())).thenReturn(leafLookup); return new ScoreScript.LeafFactory() { @Override @@ -193,7 +194,7 @@ public boolean needs_score() { @Override public ScoreScript newInstance(LeafReaderContext ctx) throws IOException { - return new ScoreScript(script.getParams(), lookup, leafReaderContext) { + return new ScoreScript(script.getParams(), lookup, indexSearcher, leafReaderContext) { @Override public double execute(ExplanationHolder explanation) { return function.apply(explanation); diff --git a/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java index 98912e53c9d6a..cb0614ddeb808 100644 --- a/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java @@ -33,6 +33,7 @@ package org.opensearch.script; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Scorable; import org.opensearch.index.query.IntervalFilterScript; import org.opensearch.index.similarity.ScriptedSimilarity.Doc; @@ -624,7 +625,7 @@ public MockScoreScript(MockDeterministicScript script) { } @Override - public ScoreScript.LeafFactory newFactory(Map params, SearchLookup lookup) { + public ScoreScript.LeafFactory newFactory(Map params, SearchLookup lookup, IndexSearcher indexSearcher) { return new ScoreScript.LeafFactory() { @Override public boolean needs_score() { @@ -634,7 +635,7 @@ public boolean needs_score() { @Override public ScoreScript newInstance(LeafReaderContext ctx) throws IOException { Scorable[] scorerHolder = new Scorable[1]; - return new ScoreScript(params, lookup, ctx) { + return new ScoreScript(params, lookup, indexSearcher, ctx) { @Override public double execute(ExplanationHolder explanation) { Map vars = new HashMap<>(getParams()); From 9272aa21493eb16734b43d461ebc1f111087d60b Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Wed, 23 Aug 2023 13:47:26 +0000 Subject: [PATCH 10/30] [Remote Store] Fix tests when we restore index without any refresh (#9480) Signed-off-by: Sachin Kale --- .../RemoteStoreBaseIntegTestCase.java | 2 +- .../remotestore/RemoteStoreRestoreIT.java | 36 ++++++++-------- .../opensearch/index/shard/StoreRecovery.java | 20 ++++++--- .../org/opensearch/index/store/Store.java | 15 +++++-- .../index/translog/TranslogHeader.java | 32 ++++++++------ .../opensearch/index/store/StoreTests.java | 37 ++++++++++++++++ .../index/translog/TranslogHeaderTests.java | 43 +++++++++++++++++++ 7 files changed, 143 insertions(+), 42 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index f81edc1ff0e4d..90efafe9423c6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -72,7 +72,7 @@ protected Map indexData(int numberOfIterations, boolean invokeFlus Map indexingStats = new HashMap<>(); for (int i = 0; i < numberOfIterations; i++) { if (invokeFlush) { - flush(index); + flushAndRefresh(index); } else { refresh(index); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java index 507ab40084355..e9d8933961073 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java @@ -38,7 +38,6 @@ public class RemoteStoreRestoreIT extends RemoteStoreBaseIntegTestCase { private static final String TOTAL_OPERATIONS = "total-operations"; private static final String REFRESHED_OR_FLUSHED_OPERATIONS = "refreshed-or-flushed-operations"; private static final String MAX_SEQ_NO_TOTAL = "max-seq-no-total"; - private static final String MAX_SEQ_NO_REFRESHED_OR_FLUSHED = "max-seq-no-refreshed-or-flushed"; @Override public Settings indexSettings() { @@ -68,18 +67,18 @@ private void restore(String... indices) { ); } - private void verifyRestoredData(Map indexStats, boolean checkTotal, String indexName) { + private void verifyRestoredData(Map indexStats, String indexName) { // This is required to get updated number from already active shards which were not restored refresh(indexName); - String statsGranularity = checkTotal ? TOTAL_OPERATIONS : REFRESHED_OR_FLUSHED_OPERATIONS; - String maxSeqNoGranularity = checkTotal ? MAX_SEQ_NO_TOTAL : MAX_SEQ_NO_REFRESHED_OR_FLUSHED; ensureYellowAndNoInitializingShards(indexName); ensureGreen(indexName); - assertHitCount(client().prepareSearch(indexName).setSize(0).get(), indexStats.get(statsGranularity)); + assertHitCount(client().prepareSearch(indexName).setSize(0).get(), indexStats.get(TOTAL_OPERATIONS)); IndexResponse response = indexSingleDoc(indexName); - assertEquals(indexStats.get(maxSeqNoGranularity + "-shard-" + response.getShardId().id()) + 1, response.getSeqNo()); + if (indexStats.containsKey(MAX_SEQ_NO_TOTAL + "-shard-" + response.getShardId().id())) { + assertEquals(indexStats.get(MAX_SEQ_NO_TOTAL + "-shard-" + response.getShardId().id()) + 1, response.getSeqNo()); + } refresh(indexName); - assertHitCount(client().prepareSearch(indexName).setSize(0).get(), indexStats.get(statsGranularity) + 1); + assertHitCount(client().prepareSearch(indexName).setSize(0).get(), indexStats.get(TOTAL_OPERATIONS) + 1); } private void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, String indices, int replicaCount, int shardCount) { @@ -96,7 +95,6 @@ private void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, St * Simulates all data restored using Remote Translog Store. * @throws IOException IO Exception. */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/6188") public void testRemoteTranslogRestoreWithNoDataPostCommit() throws IOException { testRestoreFlow(1, true, randomIntBetween(1, 5)); } @@ -131,7 +129,6 @@ public void testRemoteTranslogRestoreWithCommittedData() throws IOException { * Simulates all data restored using Remote Translog Store. * @throws IOException IO Exception. */ - // @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/6188") @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8479") public void testRTSRestoreWithNoDataPostCommitPrimaryReplicaDown() throws IOException { testRestoreFlowBothPrimaryReplicasDown(1, true, randomIntBetween(1, 5)); @@ -172,7 +169,7 @@ private void restoreAndVerify(int shardCount, int replicaCount, Map indexStats = indexData(numberOfIterations, invokeFlush, INDEX_NAME); assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); + assertHitCount(client().prepareSearch(INDEX_NAME).setSize(0).get(), indexStats.get(REFRESHED_OR_FLUSHED_OPERATIONS)); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); ensureRed(INDEX_NAME); @@ -256,7 +255,7 @@ private void testRestoreFlowMultipleIndices(int numberOfIterations, boolean invo ensureGreen(indices); for (String index : indices) { assertEquals(shardCount, getNumShards(index).totalNumShards); - verifyRestoredData(indicesStats.get(index), true, index); + verifyRestoredData(indicesStats.get(index), index); } } @@ -288,7 +287,7 @@ public void testRestoreFlowNoRedIndex() { ensureGreen(INDEX_NAME); assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); - verifyRestoredData(indexStats, true, INDEX_NAME); + verifyRestoredData(indexStats, INDEX_NAME); } /** @@ -340,7 +339,7 @@ public void testRTSRestoreWithCommittedDataDefaultAllIndices() throws IOExceptio for (String index : indices) { assertEquals(shardCount, getNumShards(index).totalNumShards); - verifyRestoredData(indicesStats.get(index), true, index); + verifyRestoredData(indicesStats.get(index), index); } } @@ -384,9 +383,9 @@ public void testRTSRestoreWithCommittedDataNotAllRedRemoteIndices() throws IOExc ); ensureGreen(indices[0], indices[1]); assertEquals(shardCount, getNumShards(indices[0]).totalNumShards); - verifyRestoredData(indicesStats.get(indices[0]), true, indices[0]); + verifyRestoredData(indicesStats.get(indices[0]), indices[0]); assertEquals(shardCount, getNumShards(indices[1]).totalNumShards); - verifyRestoredData(indicesStats.get(indices[1]), true, indices[1]); + verifyRestoredData(indicesStats.get(indices[1]), indices[1]); ensureRed(indices[2], indices[3]); } @@ -436,9 +435,9 @@ public void testRTSRestoreWithCommittedDataExcludeIndicesPatterns() throws IOExc ); ensureGreen(indices[0], indices[1]); assertEquals(shardCount, getNumShards(indices[0]).totalNumShards); - verifyRestoredData(indicesStats.get(indices[0]), true, indices[0]); + verifyRestoredData(indicesStats.get(indices[0]), indices[0]); assertEquals(shardCount, getNumShards(indices[1]).totalNumShards); - verifyRestoredData(indicesStats.get(indices[1]), true, indices[1]); + verifyRestoredData(indicesStats.get(indices[1]), indices[1]); ensureRed(indices[2], indices[3]); } @@ -447,8 +446,7 @@ public void testRTSRestoreWithCommittedDataExcludeIndicesPatterns() throws IOExc * when the index has no data. * @throws IOException IO Exception. */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/6188") - public void testRTSRestoreNoData() throws IOException { + public void testRTSRestoreDataOnlyInTranslog() throws IOException { testRestoreFlow(0, true, randomIntBetween(1, 5)); } diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index b565ddd6c819a..d0c083390ab70 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -64,7 +64,9 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.Store; +import org.opensearch.index.translog.Checkpoint; import org.opensearch.index.translog.Translog; +import org.opensearch.index.translog.TranslogHeader; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.repositories.IndexId; @@ -74,6 +76,8 @@ import java.io.IOException; import java.nio.channels.FileChannel; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; import java.util.Arrays; import java.util.HashMap; import java.util.List; @@ -83,6 +87,7 @@ import java.util.stream.Collectors; import static org.opensearch.common.unit.TimeValue.timeValueMillis; +import static org.opensearch.index.translog.Translog.CHECKPOINT_FILE_NAME; /** * This package private utility class encapsulates the logic to recover an index shard from either an existing index on @@ -532,13 +537,16 @@ private void recoverFromRemoteStore(IndexShard indexShard) throws IndexShardReco // Download segments from remote segment store indexShard.syncSegmentsFromRemoteSegmentStore(true, true); + indexShard.syncTranslogFilesFromRemoteTranslog(); + if (store.directory().listAll().length == 0) { - store.createEmpty(indexShard.indexSettings().getIndexVersionCreated().luceneVersion); - } - if (indexShard.indexSettings.isRemoteTranslogStoreEnabled()) { - indexShard.syncTranslogFilesFromRemoteTranslog(); - } else { - bootstrap(indexShard, store); + Path location = indexShard.shardPath().resolveTranslog(); + Checkpoint checkpoint = Checkpoint.read(location.resolve(CHECKPOINT_FILE_NAME)); + final Path translogFile = location.resolve(Translog.getFilename(checkpoint.getGeneration())); + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + TranslogHeader translogHeader = TranslogHeader.read(translogFile, channel); + store.createEmpty(indexShard.indexSettings().getIndexVersionCreated().luceneVersion, translogHeader.getTranslogUUID()); + } } assert indexShard.shardRouting.primary() : "only primary shards can recover from store"; diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 16059e0604072..4f51994a6ac2f 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -1747,13 +1747,13 @@ public void accept(ShardLock Lock) {} }; } - /** - * creates an empty lucene index and a corresponding empty translog. Any existing data will be deleted. - */ - public void createEmpty(Version luceneVersion) throws IOException { + public void createEmpty(Version luceneVersion, String translogUUID) throws IOException { metadataLock.writeLock().lock(); try (IndexWriter writer = newEmptyIndexWriter(directory, luceneVersion)) { final Map map = new HashMap<>(); + if (translogUUID != null) { + map.put(Translog.TRANSLOG_UUID_KEY, translogUUID); + } map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(SequenceNumbers.NO_OPS_PERFORMED)); map.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(SequenceNumbers.NO_OPS_PERFORMED)); @@ -1764,6 +1764,13 @@ public void createEmpty(Version luceneVersion) throws IOException { } } + /** + * creates an empty lucene index and a corresponding empty translog. Any existing data will be deleted. + */ + public void createEmpty(Version luceneVersion) throws IOException { + createEmpty(luceneVersion, null); + } + /** * Marks an existing lucene index with a new history uuid. * This is used to make sure no existing shard will recovery from this index using ops based recovery. diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java b/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java index 1090a994bf6ad..42bda11d75783 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java @@ -57,7 +57,7 @@ * * @opensearch.internal */ -final class TranslogHeader { +public final class TranslogHeader { public static final String TRANSLOG_CODEC = "translog"; public static final int VERSION_CHECKSUMS = 1; // pre-2.0 - unsupported @@ -137,9 +137,26 @@ static int readHeaderVersion(final Path path, final FileChannel channel, final S } /** - * Read a translog header from the given path and file channel + * Read a translog header from the given path and file channel and compare the given UUID */ static TranslogHeader read(final String translogUUID, final Path path, final FileChannel channel) throws IOException { + TranslogHeader translogHeader = read(path, channel); + // verify UUID only after checksum, to ensure that UUID is not corrupted + final BytesRef expectedUUID = new BytesRef(translogUUID); + final BytesRef actualUUID = new BytesRef(translogHeader.translogUUID); + if (actualUUID.bytesEquals(expectedUUID) == false) { + throw new TranslogCorruptedException( + path.toString(), + "expected shard UUID " + expectedUUID + " but got: " + actualUUID + " this translog file belongs to a different translog" + ); + } + return translogHeader; + } + + /** + * Read a translog header from the given path and file channel and compare the given UUID + */ + public static TranslogHeader read(final Path path, final FileChannel channel) throws IOException { try { // This input is intentionally not closed because closing it will close the FileChannel. final BufferedChecksumStreamInput in = new BufferedChecksumStreamInput( @@ -179,16 +196,7 @@ static TranslogHeader read(final String translogUUID, final Path path, final Fil + channel.position() + "]"; - // verify UUID only after checksum, to ensure that UUID is not corrupted - final BytesRef expectedUUID = new BytesRef(translogUUID); - if (uuid.bytesEquals(expectedUUID) == false) { - throw new TranslogCorruptedException( - path.toString(), - "expected shard UUID " + expectedUUID + " but got: " + uuid + " this translog file belongs to a different translog" - ); - } - - return new TranslogHeader(translogUUID, primaryTerm, headerSizeInBytes); + return new TranslogHeader(uuid.utf8ToString(), primaryTerm, headerSizeInBytes); } catch (EOFException e) { throw new TranslogCorruptedException(path.toString(), "translog header truncated", e); } diff --git a/server/src/test/java/org/opensearch/index/store/StoreTests.java b/server/src/test/java/org/opensearch/index/store/StoreTests.java index 957a3bdb08501..8395b3e8ac08e 100644 --- a/server/src/test/java/org/opensearch/index/store/StoreTests.java +++ b/server/src/test/java/org/opensearch/index/store/StoreTests.java @@ -84,6 +84,7 @@ import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLease; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.translog.Translog; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; import org.opensearch.test.DummyShardLock; @@ -1166,6 +1167,42 @@ public void testGetMetadataWithSegmentInfos() throws IOException { store.close(); } + public void testCreateEmptyStore() throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 1); + Store store = new Store(shardId, INDEX_SETTINGS, new NIOFSDirectory(createTempDir()), new DummyShardLock(shardId)); + store.createEmpty(Version.LATEST); + SegmentInfos segmentInfos = Lucene.readSegmentInfos(store.directory()); + assertFalse(segmentInfos.getUserData().containsKey(Translog.TRANSLOG_UUID_KEY)); + testDefaultUserData(segmentInfos); + store.close(); + } + + public void testCreateEmptyStoreWithTranlogUUID() throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 1); + Store store = new Store(shardId, INDEX_SETTINGS, new NIOFSDirectory(createTempDir()), new DummyShardLock(shardId)); + store.createEmpty(Version.LATEST, "dummy-translog-UUID"); + SegmentInfos segmentInfos = Lucene.readSegmentInfos(store.directory()); + assertEquals("dummy-translog-UUID", segmentInfos.getUserData().get(Translog.TRANSLOG_UUID_KEY)); + testDefaultUserData(segmentInfos); + store.close(); + } + + public void testCreateEmptyWithNullTranlogUUID() throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 1); + Store store = new Store(shardId, INDEX_SETTINGS, new NIOFSDirectory(createTempDir()), new DummyShardLock(shardId)); + store.createEmpty(Version.LATEST, null); + SegmentInfos segmentInfos = Lucene.readSegmentInfos(store.directory()); + assertFalse(segmentInfos.getUserData().containsKey(Translog.TRANSLOG_UUID_KEY)); + testDefaultUserData(segmentInfos); + store.close(); + } + + private void testDefaultUserData(SegmentInfos segmentInfos) { + assertEquals("-1", segmentInfos.getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + assertEquals("-1", segmentInfos.getUserData().get(SequenceNumbers.MAX_SEQ_NO)); + assertEquals("-1", segmentInfos.getUserData().get(Engine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID)); + } + public void testGetSegmentMetadataMap() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); Store store = new Store( diff --git a/server/src/test/java/org/opensearch/index/translog/TranslogHeaderTests.java b/server/src/test/java/org/opensearch/index/translog/TranslogHeaderTests.java index 4441e30ea639d..a5d6ee7a06e23 100644 --- a/server/src/test/java/org/opensearch/index/translog/TranslogHeaderTests.java +++ b/server/src/test/java/org/opensearch/index/translog/TranslogHeaderTests.java @@ -132,6 +132,49 @@ public void testHeaderWithoutPrimaryTerm() throws Exception { }); } + public void testCurrentHeaderVersionWithoutUUIDComparison() throws Exception { + final String translogUUID = UUIDs.randomBase64UUID(); + final TranslogHeader outHeader = new TranslogHeader(translogUUID, randomNonNegativeLong()); + final long generation = randomNonNegativeLong(); + final Path translogFile = createTempDir().resolve(Translog.getFilename(generation)); + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)) { + outHeader.write(channel, true); + assertThat(outHeader.sizeInBytes(), equalTo((int) channel.position())); + } + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + final TranslogHeader inHeader = TranslogHeader.read(translogFile, channel); + assertThat(inHeader.getTranslogUUID(), equalTo(translogUUID)); + assertThat(inHeader.getPrimaryTerm(), equalTo(outHeader.getPrimaryTerm())); + assertThat(inHeader.sizeInBytes(), equalTo((int) channel.position())); + } + + TestTranslog.corruptFile(logger, random(), translogFile, false); + final TranslogCorruptedException corruption = expectThrows(TranslogCorruptedException.class, () -> { + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + final TranslogHeader translogHeader = TranslogHeader.read(translogFile, channel); + assertThat( + "version " + TranslogHeader.VERSION_CHECKPOINTS + " translog", + translogHeader.getPrimaryTerm(), + equalTo(SequenceNumbers.UNASSIGNED_PRIMARY_TERM) + ); + throw new TranslogCorruptedException(translogFile.toString(), "adjusted translog version"); + } catch (IllegalStateException e) { + // corruption corrupted the version byte making this look like a v2, v1 or v0 translog + assertThat( + "version " + TranslogHeader.VERSION_CHECKPOINTS + "-or-earlier translog", + e.getMessage(), + anyOf( + containsString("pre-2.0 translog found"), + containsString("pre-1.4 translog found"), + containsString("pre-6.3 translog found") + ) + ); + throw new TranslogCorruptedException(translogFile.toString(), "adjusted translog version", e); + } + }); + assertThat(corruption.getMessage(), not(containsString("this translog file belongs to a different translog"))); + } + static void writeHeaderWithoutTerm(FileChannel channel, String translogUUID) throws IOException { final OutputStreamStreamOutput out = new OutputStreamStreamOutput(Channels.newOutputStream(channel)); CodecUtil.writeHeader(new OutputStreamDataOutput(out), TranslogHeader.TRANSLOG_CODEC, TranslogHeader.VERSION_CHECKPOINTS); From 980bf3cf8d3a4391c9f10bff34f0d0b68046a4d8 Mon Sep 17 00:00:00 2001 From: panguixin Date: Wed, 23 Aug 2023 22:03:04 +0800 Subject: [PATCH 11/30] [BUG] Reconstruct pit infos when deserialize GetAllPitNodesResponse (#9410) * [BUG] Reconstruct pit infos when deserialize GetAllPitNodesResponse Signed-off-by: panguixin * add searializion test case Signed-off-by: panguixin * run spotless Signed-off-by: panguixin --------- Signed-off-by: panguixin --- .../action/search/GetAllPitNodesResponse.java | 6 ++ .../opensearch/action/search/ListPitInfo.java | 14 +++ .../search/GetAllPitNodesResponseTests.java | 98 +++++++++++++++++++ 3 files changed, 118 insertions(+) create mode 100644 server/src/test/java/org/opensearch/action/search/GetAllPitNodesResponseTests.java diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java index 055eb84ab3811..9bb3ab6407696 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java @@ -41,6 +41,12 @@ public class GetAllPitNodesResponse extends BaseNodesResponse uniquePitIds = new HashSet<>(); + pitInfos.addAll( + getNodes().stream() + .flatMap(p -> p.getPitInfos().stream().filter(t -> uniquePitIds.add(t.getPitId()))) + .collect(Collectors.toList()) + ); } public GetAllPitNodesResponse( diff --git a/server/src/main/java/org/opensearch/action/search/ListPitInfo.java b/server/src/main/java/org/opensearch/action/search/ListPitInfo.java index e120507f4d47a..220b7247517b9 100644 --- a/server/src/main/java/org/opensearch/action/search/ListPitInfo.java +++ b/server/src/main/java/org/opensearch/action/search/ListPitInfo.java @@ -17,6 +17,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Objects; import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; @@ -80,4 +81,17 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ListPitInfo that = (ListPitInfo) o; + return pitId.equals(that.pitId) && creationTime == that.creationTime && keepAlive == that.keepAlive; + } + + @Override + public int hashCode() { + return Objects.hash(pitId, creationTime, keepAlive); + } + } diff --git a/server/src/test/java/org/opensearch/action/search/GetAllPitNodesResponseTests.java b/server/src/test/java/org/opensearch/action/search/GetAllPitNodesResponseTests.java new file mode 100644 index 0000000000000..882b397575e93 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/GetAllPitNodesResponseTests.java @@ -0,0 +1,98 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.Version; +import org.opensearch.action.FailedNodeException; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.TransportException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; + +public class GetAllPitNodesResponseTests extends OpenSearchTestCase { + protected void assertEqualInstances(GetAllPitNodesResponse expected, GetAllPitNodesResponse actual) { + assertNotSame(expected, actual); + Set expectedPitInfos = new HashSet<>(expected.getPitInfos()); + Set actualPitInfos = new HashSet<>(actual.getPitInfos()); + assertEquals(expectedPitInfos, actualPitInfos); + + List expectedResponses = expected.getNodes(); + List actualResponses = actual.getNodes(); + assertEquals(expectedResponses.size(), actualResponses.size()); + for (int i = 0; i < expectedResponses.size(); i++) { + assertEquals(expectedResponses.get(i).getNode(), actualResponses.get(i).getNode()); + Set expectedNodePitInfos = new HashSet<>(expectedResponses.get(i).getPitInfos()); + Set actualNodePitInfos = new HashSet<>(actualResponses.get(i).getPitInfos()); + assertEquals(expectedNodePitInfos, actualNodePitInfos); + } + + List expectedFailures = expected.failures(); + List actualFailures = actual.failures(); + assertEquals(expectedFailures.size(), actualFailures.size()); + for (int i = 0; i < expectedFailures.size(); i++) { + assertEquals(expectedFailures.get(i).nodeId(), actualFailures.get(i).nodeId()); + assertEquals(expectedFailures.get(i).getMessage(), actualFailures.get(i).getMessage()); + assertEquals(expectedFailures.get(i).getCause().getClass(), actualFailures.get(i).getCause().getClass()); + } + } + + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Collections.emptyList()); + } + + public void testSerialization() throws IOException { + GetAllPitNodesResponse response = createTestItem(); + GetAllPitNodesResponse deserialized = copyWriteable(response, getNamedWriteableRegistry(), GetAllPitNodesResponse::new); + assertEqualInstances(response, deserialized); + } + + private GetAllPitNodesResponse createTestItem() { + int numNodes = randomIntBetween(1, 10); + int numPits = randomInt(10); + List candidatePitInfos = new ArrayList<>(numPits); + for (int i = 0; i < numNodes; i++) { + candidatePitInfos.add(new ListPitInfo(randomAlphaOfLength(10), randomLong(), randomLong())); + } + + List responses = new ArrayList<>(); + List failures = new ArrayList<>(); + for (int i = 0; i < numNodes; i++) { + DiscoveryNode node = new DiscoveryNode( + randomAlphaOfLength(10), + buildNewFakeTransportAddress(), + emptyMap(), + emptySet(), + Version.CURRENT + ); + if (randomBoolean()) { + List nodePitInfos = new ArrayList<>(); + for (int j = 0; j < randomInt(numPits); j++) { + nodePitInfos.add(randomFrom(candidatePitInfos)); + } + responses.add(new GetAllPitNodeResponse(node, nodePitInfos)); + } else { + failures.add( + new FailedNodeException(node.getId(), randomAlphaOfLength(10), new TransportException(randomAlphaOfLength(10))) + ); + } + } + return new GetAllPitNodesResponse(new ClusterName("test"), responses, failures); + } +} From d1678ba95da35db46a35404e87946f98f2eee1b4 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Wed, 23 Aug 2023 11:50:41 -0700 Subject: [PATCH 12/30] Fix flaky test SegmentReplicationWithRemoteStorePressureIT.testAddReplicaWhileWritesBlocked. (#9501) This test fails on ensureGreen after adding a replica. This is run inside of the try with resources that blocks operations. The block works by mocking transport calls to prevent segrep from completing until released. Fixed by moving the ensureGreen until after releasing blockOperations. Also re duced the doc count that is used while indexing down from max 200. Writes wit h the remote store version of this test take a much longer time to execute whe n performed serially, and we don't need this many docs indexed to create needed checkpoints. Signed-off-by: Marc Handalian --- .../org/opensearch/index/SegmentReplicationPressureIT.java | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java b/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java index ea45e09acb011..883e539b74b68 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java @@ -139,7 +139,6 @@ public void testWritesRejected() throws Exception { * This test ensures that a replica can be added while the index is under write block. * Ensuring that only write requests are blocked. */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8887") public void testAddReplicaWhileWritesBlocked() throws Exception { final String primaryNode = internalCluster().startNode(); createIndex(INDEX_NAME); @@ -176,10 +175,10 @@ public void testAddReplicaWhileWritesBlocked() throws Exception { .prepareUpdateSettings(INDEX_NAME) .setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, 2)) ); - ensureGreen(INDEX_NAME); replicaNodes.add(replica_2); - waitForSearchableDocs(totalDocs.get(), replica_2); } + ensureGreen(INDEX_NAME); + waitForSearchableDocs(totalDocs.get(), replicaNodes); refresh(INDEX_NAME); // wait for the replicas to catch up after block is released. assertReplicaCheckpointUpdated(primaryShard); @@ -347,7 +346,7 @@ private BulkResponse executeBulkRequest(List nodes, int docsPerBatch) { private int indexUntilCheckpointCount() { int total = 0; for (int i = 0; i < MAX_CHECKPOINTS_BEHIND; i++) { - final int numDocs = randomIntBetween(1, 100); + final int numDocs = randomIntBetween(1, 5); for (int j = 0; j < numDocs; ++j) { indexDoc(); } From b8fe8b2f4f7268f78b7f42c87adc3250ae38342d Mon Sep 17 00:00:00 2001 From: Sayali Gaikawad <61760125+gaiksaya@users.noreply.github.com> Date: Wed, 23 Aug 2023 13:15:49 -0700 Subject: [PATCH 13/30] Run compatibility check based on PR base branch (#9374) Signed-off-by: Sayali Gaikawad --- .github/workflows/check-compatibility.yml | 30 ++++++++++++++++------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/.github/workflows/check-compatibility.yml b/.github/workflows/check-compatibility.yml index d9c232e8b1015..b5f2ccbae6917 100644 --- a/.github/workflows/check-compatibility.yml +++ b/.github/workflows/check-compatibility.yml @@ -5,11 +5,15 @@ on: pull_request_target jobs: - build: + check-compatibility: if: github.repository == 'opensearch-project/OpenSearch' + permissions: + contents: read runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} - name: Run compatibility task run: ./gradlew checkCompatibility -i | tee $HOME/gradlew-check.out @@ -22,17 +26,25 @@ jobs: echo "### Skipped components" >> "${{ github.workspace }}/results.txt" && grep -e 'Skipped component' $HOME/gradlew-check.out | sed -e 's/Skipped component: \[\(.*\)\]/- \1/' >> "${{ github.workspace }}/results.txt" echo "### Compatible components" >> "${{ github.workspace }}/results.txt" && grep -e 'Compatible component' $HOME/gradlew-check.out | sed -e 's/Compatible component: \[\(.*\)\]/- \1/' >> "${{ github.workspace }}/results.txt" - - name: GitHub App token - id: github_app_token - uses: tibdex/github-app-token@v1.6.0 + - name: Upload results + uses: actions/upload-artifact@v3 + with: + name: results.txt + path: ${{ github.workspace }}/results.txt + + add-comment: + needs: [check-compatibility] + permissions: + pull-requests: write + runs-on: ubuntu-latest + steps: + - name: Download results + uses: actions/download-artifact@v3 with: - app_id: ${{ secrets.APP_ID }} - private_key: ${{ secrets.APP_PRIVATE_KEY }} - installation_id: 22958780 + name: results.txt - name: Add comment on the PR uses: peter-evans/create-or-update-comment@v3 with: - token: ${{ steps.github_app_token.outputs.token }} issue-number: ${{ github.event.number }} - body-path: "${{ github.workspace }}/results.txt" + body-path: results.txt From 89ccda9a3a0f8120c0a701094c759d55288bd0bd Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Wed, 23 Aug 2023 13:45:39 -0700 Subject: [PATCH 14/30] Add Michael Froh as a maintainer (#9463) Signed-off-by: Andrew Ross --- .github/CODEOWNERS | 2 +- MAINTAINERS.md | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 1a108c35429ae..440f735b190d1 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1 @@ -* @reta @anasalkouz @andrross @reta @Bukhtawar @CEHENKLE @dblock @gbbafna @setiah @kartg @kotwanikunal @mch2 @nknize @owaiskazi19 @Rishikesh1159 @ryanbogan @saratvemulapalli @shwetathareja @dreamer-89 @tlfeng @VachaShah @dbwiddis @sachinpkale @sohami +* @reta @anasalkouz @andrross @reta @Bukhtawar @CEHENKLE @dblock @gbbafna @setiah @kartg @kotwanikunal @mch2 @nknize @owaiskazi19 @Rishikesh1159 @ryanbogan @saratvemulapalli @shwetathareja @dreamer-89 @tlfeng @VachaShah @dbwiddis @sachinpkale @sohami @msfroh diff --git a/MAINTAINERS.md b/MAINTAINERS.md index cac17903b1925..e552dbdcd83f3 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -6,7 +6,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Maintainer | GitHub ID | Affiliation | |--------------------------| ------------------------------------------------------- | ----------- | -| Abbas Hussain | [abbashus](https://github.com/abbashus) | Meta | +| Abbas Hussain | [abbashus](https://github.com/abbashus) | Meta | | Anas Alkouz | [anasalkouz](https://github.com/anasalkouz) | Amazon | | Andrew Ross | [andrross](https://github.com/andrross) | Amazon | | Andriy Redko | [reta](https://github.com/reta) | Aiven | @@ -19,9 +19,10 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Kartik Ganesh | [kartg](https://github.com/kartg) | Amazon | | Kunal Kotwani | [kotwanikunal](https://github.com/kotwanikunal) | Amazon | | Marc Handalian | [mch2](https://github.com/mch2) | Amazon | +| Michael Froh | [msfroh](https://github.com/msfroh) | Amazon | | Nick Knize | [nknize](https://github.com/nknize) | Amazon | | Owais Kazi | [owaiskazi19](https://github.com/owaiskazi19) | Amazon | -| Rabi Panda | [adnapibar](https://github.com/adnapibar) | Independent | +| Rabi Panda | [adnapibar](https://github.com/adnapibar) | Independent | | Rishikesh Pasham | [Rishikesh1159](https://github.com/Rishikesh1159) | Amazon | | Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon | | Sachin Kale | [sachinpkale](https://github.com/sachinpkale) | Amazon | From 0c839c3e35b4086acb88a276de9d10ece637d318 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Wed, 23 Aug 2023 14:42:12 -0700 Subject: [PATCH 15/30] Fix range reads in respository-s3 (#9516) The `readBlob(String, long, long)` method in the S3 repository has been broken since the upgrade to AWS SDK v2. The cause is that the SDK v2 returns the content range length details in a string formatting per the [RFC 9110][1] spec. For example: ``` bytes 0-100/200 ``` However, the code was attempting to parse it as: ``` bytes=0-100 ``` The fix here is to not parse this string at all and instead use `GetObjectResponse#contentLength`. Note that the incorrect format matches how a range is specified in a _request_ per the [byte ranges][2] section of the RFC and that is likely the source of the confusion. We lack any dedicated integration testing of this method so the bug was not caught by any tests. Additionally, the range read is only used by the searchable snapshot feature currently and we have no automated integration testing with the different repository implementations. One other complicating factor is that due to a fallback path that returns `Long.MAX_VALUE - 1` when the range is failed to be parsed, the bug will only manifest as a long overflow error when requesting past the first block and therefore wasn't caught with very simple manual testing. [1]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-range [2]: https://www.rfc-editor.org/rfc/rfc9110.html#name-byte-ranges Signed-off-by: Andrew Ross --- CHANGELOG.md | 1 + .../hdfs/HdfsBlobStoreRepositoryTests.java | 4 +++ .../s3/S3RetryingInputStream.java | 26 +----------------- .../repositories/s3/utils/HttpRangeUtils.java | 27 ++++++------------- .../s3/S3RetryingInputStreamTests.java | 5 ++-- ...earchBlobStoreRepositoryIntegTestCase.java | 21 +++++++++++++++ 6 files changed, 37 insertions(+), 47 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 199461fd93cd7..80721b0f1b3e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -161,6 +161,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Fixed - Fix flaky ResourceAwareTasksTests.testBasicTaskResourceTracking test ([#8993](https://github.com/opensearch-project/OpenSearch/pull/8993)) - Fix memory leak when using Zstd Dictionary ([#9403](https://github.com/opensearch-project/OpenSearch/pull/9403)) +- Fix range reads in respository-s3 ([9512](https://github.com/opensearch-project/OpenSearch/issues/9512)) ### Security diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java index 0df39636b8ffa..6ff18b20036a8 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java @@ -66,4 +66,8 @@ protected Settings repositorySettings() { protected Collection> nodePlugins() { return Collections.singletonList(HdfsPlugin.class); } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9513") + @Override + public void testReadRange() {} } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java index 6d41a72ac9af8..3a35f6135f28b 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java @@ -40,7 +40,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.common.collect.Tuple; import org.opensearch.common.util.io.IOUtils; import org.opensearch.repositories.s3.utils.HttpRangeUtils; @@ -120,7 +119,7 @@ private void openStream() throws IOException { ); this.currentStreamLastOffset = Math.addExact( Math.addExact(start, currentOffset), - getStreamLength(getObjectResponseInputStream.response()) + getObjectResponseInputStream.response().contentLength() ); this.currentStream = getObjectResponseInputStream; this.isStreamAborted.set(false); @@ -134,29 +133,6 @@ private void openStream() throws IOException { } } - private long getStreamLength(final GetObjectResponse getObjectResponse) { - try { - // Returns the content range of the object if response contains the Content-Range header. - if (getObjectResponse.contentRange() != null) { - final Tuple s3ResponseRange = HttpRangeUtils.fromHttpRangeHeader(getObjectResponse.contentRange()); - assert s3ResponseRange.v2() >= s3ResponseRange.v1() : s3ResponseRange.v2() + " vs " + s3ResponseRange.v1(); - assert s3ResponseRange.v1() == start + currentOffset : "Content-Range start value [" - + s3ResponseRange.v1() - + "] exceeds start [" - + start - + "] + current offset [" - + currentOffset - + ']'; - assert s3ResponseRange.v2() == end : "Content-Range end value [" + s3ResponseRange.v2() + "] exceeds end [" + end + ']'; - return s3ResponseRange.v2() - s3ResponseRange.v1() + 1L; - } - return getObjectResponse.contentLength(); - } catch (Exception e) { - assert false : e; - return Long.MAX_VALUE - 1L; // assume a large stream so that the underlying stream is aborted on closing, unless eof is reached - } - } - @Override public int read() throws IOException { ensureOpen(); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/utils/HttpRangeUtils.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/utils/HttpRangeUtils.java index 97b9829124d0d..40aec7d52847b 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/utils/HttpRangeUtils.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/utils/HttpRangeUtils.java @@ -8,25 +8,14 @@ package org.opensearch.repositories.s3.utils; -import software.amazon.awssdk.core.exception.SdkException; - -import org.opensearch.common.collect.Tuple; - -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -public class HttpRangeUtils { - - private static final Pattern RANGE_PATTERN = Pattern.compile("^bytes=([0-9]+)-([0-9]+)$"); - - public static Tuple fromHttpRangeHeader(String headerValue) { - Matcher matcher = RANGE_PATTERN.matcher(headerValue); - if (!matcher.find()) { - throw SdkException.create("Regex match for Content-Range header {" + headerValue + "} failed", new RuntimeException()); - } - return new Tuple<>(Long.parseLong(matcher.group(1)), Long.parseLong(matcher.group(2))); - } - +public final class HttpRangeUtils { + + /** + * Provides a byte range string per RFC 9110 + * @param start start position (inclusive) + * @param end end position (inclusive) + * @return A 'bytes=start-end' string + */ public static String toHttpRangeHeader(long start, long end) { return "bytes=" + start + "-" + end; } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java index 8be1d72c95b15..b38d5119b4108 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java @@ -38,7 +38,6 @@ import software.amazon.awssdk.services.s3.model.GetObjectResponse; import org.opensearch.common.io.Streams; -import org.opensearch.repositories.s3.utils.HttpRangeUtils; import org.opensearch.test.OpenSearchTestCase; import java.io.ByteArrayInputStream; @@ -104,11 +103,11 @@ public void testRangeInputStreamIsAborted() throws IOException { } private S3RetryingInputStream createInputStream(final byte[] data, final Long start, final Long length) throws IOException { - long end = Math.addExact(start, length - 1); + final long end = Math.addExact(start, length - 1); final S3Client client = mock(S3Client.class); when(client.getObject(any(GetObjectRequest.class))).thenReturn( new ResponseInputStream<>( - GetObjectResponse.builder().contentLength(length).contentRange(HttpRangeUtils.toHttpRangeHeader(start, end)).build(), + GetObjectResponse.builder().contentLength(length).build(), new ByteArrayInputStream(data, Math.toIntExact(start), Math.toIntExact(length)) ) ); diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java index 7dfe6781bf669..789858ca38fad 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java @@ -165,6 +165,27 @@ public void testWriteRead() throws IOException { } } + public void testReadRange() throws IOException { + try (BlobStore store = newBlobStore()) { + final BlobContainer container = store.blobContainer(new BlobPath()); + final byte[] data = randomBytes(4096); + + // Pick a subrange starting somewhere between position 100 and 1000 + // and ending somewhere between 100 bytes past that position and + // 100 bytes before the end + final int startOffset = randomIntBetween(100, 1000); + final int endOffset = randomIntBetween(startOffset + 100, data.length - 100); + final byte[] subrangeData = Arrays.copyOfRange(data, startOffset, endOffset); + + writeBlob(container, "foobar", new BytesArray(data), randomBoolean()); + try (InputStream stream = container.readBlob("foobar", startOffset, subrangeData.length)) { + final byte[] actual = stream.readAllBytes(); + assertArrayEquals(subrangeData, actual); + } + container.delete(); + } + } + public void testList() throws IOException { try (BlobStore store = newBlobStore()) { final BlobContainer container = store.blobContainer(new BlobPath()); From c42ada8e6d63295684303d46492216f3a6f94049 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Wed, 23 Aug 2023 16:03:45 -0700 Subject: [PATCH 16/30] Handle null partSize in OnDemandBlockSnapshotIndexInput (#9470) The `partSize()` value can be null if the underlying repository implementation does not implement file chunking. Signed-off-by: Andrew Ross --- CHANGELOG.md | 1 + .../file/OnDemandBlockSnapshotIndexInput.java | 20 ++++++--- .../OnDemandBlockSnapshotIndexInputTests.java | 43 +++++++++++++++++-- 3 files changed, 54 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 80721b0f1b3e2..5c284dbe8992f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -162,6 +162,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix flaky ResourceAwareTasksTests.testBasicTaskResourceTracking test ([#8993](https://github.com/opensearch-project/OpenSearch/pull/8993)) - Fix memory leak when using Zstd Dictionary ([#9403](https://github.com/opensearch-project/OpenSearch/pull/9403)) - Fix range reads in respository-s3 ([9512](https://github.com/opensearch-project/OpenSearch/issues/9512)) +- Handle null partSize in OnDemandBlockSnapshotIndexInput ([#9291](https://github.com/opensearch-project/OpenSearch/issues/9291)) ### Security diff --git a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java index b3f8ee9c1817e..7166e9aa482e3 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java +++ b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java @@ -8,8 +8,6 @@ package org.opensearch.index.store.remote.file; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.IndexInput; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; @@ -26,8 +24,6 @@ * @opensearch.internal */ public class OnDemandBlockSnapshotIndexInput extends OnDemandBlockIndexInput { - private static final Logger logger = LogManager.getLogger(OnDemandBlockSnapshotIndexInput.class); - /** * Where this class fetches IndexInput parts from */ @@ -48,7 +44,7 @@ public class OnDemandBlockSnapshotIndexInput extends OnDemandBlockIndexInput { protected final String fileName; /** - * part size in bytes + * Maximum size in bytes of snapshot file parts. */ protected final long partSize; @@ -104,7 +100,15 @@ public OnDemandBlockSnapshotIndexInput( super(builder); this.transferManager = transferManager; this.fileInfo = fileInfo; - this.partSize = fileInfo.partSize().getBytes(); + if (fileInfo.partSize() != null) { + this.partSize = fileInfo.partSize().getBytes(); + } else { + // Repository implementations can define a size at which to split files + // into multiple objects in the repository. If partSize() is null, then + // no splitting happens, so default to Long.MAX_VALUE here to have the + // same effect. See {@code BlobStoreRepository#chunkSize()}. + this.partSize = Long.MAX_VALUE; + } this.fileName = fileInfo.physicalName(); this.directory = directory; this.originalFileSize = fileInfo.length(); @@ -131,6 +135,10 @@ protected IndexInput fetchBlock(int blockId) throws IOException { final long blockStart = getBlockStart(blockId); final long blockEnd = blockStart + getActualBlockSize(blockId); + + // If the snapshot file is chunked, we must account for this by + // choosing the appropriate file part and updating the position + // accordingly. final int part = (int) (blockStart / partSize); final long partStart = part * partSize; diff --git a/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java b/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java index a04feea3bb8e5..2204124f1de4f 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java @@ -19,6 +19,8 @@ import org.apache.lucene.store.SimpleFSLockFactory; import org.apache.lucene.util.Constants; import org.apache.lucene.util.Version; +import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; import org.opensearch.index.store.StoreFileMetadata; @@ -31,9 +33,12 @@ import java.io.IOException; import java.nio.file.Path; +import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; @ThreadLeakFilters(filters = CleanerDaemonThreadLeakFilter.class) public class OnDemandBlockSnapshotIndexInputTests extends OpenSearchTestCase { @@ -43,7 +48,6 @@ public class OnDemandBlockSnapshotIndexInputTests extends OpenSearchTestCase { private static final String FILE_NAME = "File_Name"; private static final String BLOCK_FILE_PREFIX = FILE_NAME; private static final boolean IS_CLONE = false; - private static final ByteSizeValue BYTE_SIZE_VALUE = new ByteSizeValue(1L); private static final int FILE_SIZE = 29360128; private TransferManager transferManager; private LockFactory lockFactory; @@ -74,7 +78,38 @@ public void test4MBBlock() throws Exception { runAllTestsFor(22); } - public void runAllTestsFor(int blockSizeShift) throws Exception { + public void testChunkedRepository() throws IOException { + final long blockSize = new ByteSizeValue(1, ByteSizeUnit.KB).getBytes(); + final long repositoryChunkSize = new ByteSizeValue(2, ByteSizeUnit.KB).getBytes(); + final long fileSize = new ByteSizeValue(3, ByteSizeUnit.KB).getBytes(); + + when(transferManager.fetchBlob(any())).thenReturn(new ByteArrayIndexInput("test", new byte[(int) blockSize])); + try ( + FSDirectory directory = new MMapDirectory(path, lockFactory); + IndexInput indexInput = new OnDemandBlockSnapshotIndexInput( + OnDemandBlockIndexInput.builder() + .resourceDescription(RESOURCE_DESCRIPTION) + .offset(BLOCK_SNAPSHOT_FILE_OFFSET) + .length(FILE_SIZE) + .blockSizeShift((int) (Math.log(blockSize) / Math.log(2))) + .isClone(IS_CLONE), + new BlobStoreIndexShardSnapshot.FileInfo( + FILE_NAME, + new StoreFileMetadata(FILE_NAME, fileSize, "", Version.LATEST), + new ByteSizeValue(repositoryChunkSize) + ), + directory, + transferManager + ) + ) { + // Seek to the position past the first repository chunk + indexInput.seek(repositoryChunkSize); + } + // Verify the second chunk is requested (i.e. ".part1") + verify(transferManager).fetchBlob(argThat(request -> request.getBlobName().equals("File_Name.part1"))); + } + + private void runAllTestsFor(int blockSizeShift) throws Exception { final OnDemandBlockSnapshotIndexInput blockedSnapshotFile = createOnDemandBlockSnapshotIndexInput(blockSizeShift); final int blockSize = 1 << blockSizeShift; TestGroup.testGetBlock(blockedSnapshotFile, blockSize, FILE_SIZE); @@ -106,7 +141,7 @@ private OnDemandBlockSnapshotIndexInput createOnDemandBlockSnapshotIndexInput(in fileInfo = new BlobStoreIndexShardSnapshot.FileInfo( FILE_NAME, new StoreFileMetadata(FILE_NAME, FILE_SIZE, "", Version.LATEST), - BYTE_SIZE_VALUE + null ); int blockSize = 1 << blockSizeShift; @@ -182,7 +217,7 @@ private void initBlockFiles(int blockSize, FSDirectory fsDirectory) { } - public static class TestGroup { + private static class TestGroup { public static void testGetBlock(OnDemandBlockSnapshotIndexInput blockedSnapshotFile, int blockSize, int fileSize) { // block 0 From f5a6e6de4666db626e287f37a15967a753ea907a Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 23 Aug 2023 19:25:34 -0400 Subject: [PATCH 17/30] Update to Gradle 8.3 (#8896) Signed-off-by: Andriy Redko --- .../gradle/test/RestIntegTestTask.java | 2 +- .../org/opensearch/gradle/test/TestTask.java | 2 +- .../StandaloneRestIntegTestTask.java | 2 +- gradle/wrapper/gradle-wrapper.jar | Bin 63375 -> 63721 bytes gradle/wrapper/gradle-wrapper.properties | 4 ++-- gradlew | 3 ++- 6 files changed, 7 insertions(+), 6 deletions(-) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java index c09db55d0080c..aec31d02b9bee 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java @@ -44,7 +44,7 @@ * conventional configured tasks of {@link RestIntegTestTask} */ @CacheableTask -public class RestIntegTestTask extends StandaloneRestIntegTestTask implements TestSuiteConventionMappings { +public abstract class RestIntegTestTask extends StandaloneRestIntegTestTask implements TestSuiteConventionMappings { @SuppressWarnings("rawtypes") @Override public Task configure(Closure closure) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java index 837660d4618be..f7511a2ac7f1c 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java @@ -15,7 +15,7 @@ import org.gradle.api.tasks.testing.Test; @CacheableTask -public class TestTask extends Test implements TestSuiteConventionMappings { +public abstract class TestTask extends Test implements TestSuiteConventionMappings { @SuppressWarnings("rawtypes") @Override public Task configure(Closure closure) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java index 214240f05558d..ddcbf77b0d5e6 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java @@ -62,7 +62,7 @@ * {@link Nested} inputs. */ @CacheableTask -public class StandaloneRestIntegTestTask extends Test implements TestClustersAware, FileSystemOperationsAware { +public abstract class StandaloneRestIntegTestTask extends Test implements TestClustersAware, FileSystemOperationsAware { private Collection clusters = new HashSet<>(); private Closure beforeStart; diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 033e24c4cdf41af1ab109bc7f253b2b887023340..7f93135c49b765f8051ef9d0a6055ff8e46073d8 100644 GIT binary patch delta 28216 zcmZ6yQ*@x+6TO*^ZQHip9ox2TJ8x{;wr$&H$LgqKv*-KI%$l`+bAK-CVxOv0&)z5g z2JHL}tl@+Jd?b>@B>9{`5um}}z@(_WbP841wh56Q*(#D!%+_WFn zxTW!hkY%qR9|LgnC$UfeVp69yjV8RF>YD%YeVEatr**mzN7 z%~mf;`MId9ttnTP(NBpBu_T!aR9RPfUey|B+hCTWWUp*Wy%dWP;fVVjO?KDc*VJ^iSto8gEBp#a5qRnMR zR-GrMr4};1AUK^Wl4El^I$-(Vox98wN~VNm(oL!Se73~FCH0%|9`4hgXt)VkY;&YA zxyNzaSx28JDZ@IjQQ-r%=U60hdM!;;Y1B&M`-jR5wo|dL0PfRJBs={0-i#sk@ffUT z&!L4AR}OfxIMF;CysW-jf@GxJRaJf6F$^KwJk-s_L0t?_fJ4k67RHAk3M+heW>EqQ>mh(Ebmt5gvhew5D{oe# zo`>K30R3ukH;X#Wq!&s zh<7!d$VmuwoQfFr&7EXB^fHQhPSUeX-@m@70<^Z-3rtpi;hOA_$6iw7N*XT>pwkm9^O|F` zV$|!O7HK<&%rdLqo6c5A>AL}T)rY)mCX9IQZdUUafh2CzC~-ixktzMIU(ZZ}?tK;b zJk9Wwx!+Ej!fTgInh8by&<<;Q+>(gN(w-wO{3c($ua2PiC10N6MH6zHuCrIMQL^<_ zJbok&IZ1f&2hF8#E}+@2;m7z@mRJbXJZAMDrA>>?YCn~dS;HOKzymOhHng2>Vqt^| zqR71FIPY1`Y_tsTs>9k)&f%JOVl9oUZ$3ufI0`kM#_d@%1~~NYRSbgq>`8HS@YCTP zN1lIW7odKxwcu71yGi#68$K_+c ziEt@@hyTm6*U^3V^=kEYm`?AR*^&DQz$%CV6-c-87CA>z6cAI!Vqdi|Jtw*PVTC)3 zlYI4yE!rS)gHla|DYjQ~Vea(In8~mqeIn7W;5?2$4lJ;wAqMcLS|AcWwN%&FK2(WL zCB@UE7+TPVkEN#q8zY_zi3x8BE+TsYo3s#nfJ3DnuABb|!28j#;A;27g+x)xLTX7; zFdUA=o26z`apjP!WJaK>P+gP2ijuSvm!WBq{8a4#OJrB?Ug=K7+zHCo#~{om5nhEs z9#&+qk>(sVESM`sJSaE)ybL7yTB^J;zDIu1m$&l!OE#yxvjF6c{p&|oM!+4^|7sVv zEAcZqfZP}eW}<;f4=Lg1u0_*M-Zd@kKx|7%JfW;#kT}yRVY^C5IX^Mr^9vW0=G!6T zF&u}?lsA7r)qVcE`SrY(kG$-uK` zy|vn}D^GBxhP+f%Y;>yBFh0^0Q5|u_)gQylO808C5xO_%+ih8?+Yv@4|M?vYB7is!1y@n%8fZ?IL%a@%Qe;9q@IC)BmfjA?Nu*COkU$PP%XoE%%B7dd0rf;*AuGIs%d zOMi)Jd9Gk%3W)sXCM{Upg&JbSh^G5j%l!y8;nw*n+WIK}OM-wt=d*R0>_L9r1Z`Z+ zc;l>^^y#C*RBicDoGdG^c-*Zr{)PYO-TL>cc2ra#H9P@ml{LnWdB+Cg@@z`F$Cg+) zG%M(!=}+i3o``uvsP4UI;}edQyyqZbhpD_!BTz{O#yrq`+%` zc`uT~qNjFFBRixfq)^)E7CBxi+tN7qW>|BPwlr(li({kN6O$wSLd~@Z?I;>xiv*V4 zNVM-0H#h?4NaQa%3c&yC zig%>pq3m7pKFUN(2zW>A1lJ+WSZAKAGYMiK8&pp)v01^a<6B_rE*}s1p0O(4zakbSt3e((EqbeC`uF1H|A;Kp%N@+b0~5;x6Sji?IUl||MmI_F~I2l;HWrhBF@A~cyW>#?3TOhsOX~T z(J+~?l^huJf-@6)ffBq5{}E(V#{dT0S-bwmxJdBun@ag@6#pTiE9Ezrr2eTc4o@dX z7^#jNNu1QkkCv-BX}AEd5UzX2tqN~X2OVPl&L0Ji(PJ5Iy^nx?^D%V!wnX-q2I;-) z60eT5kXD5n4_=;$XA%1n?+VR-OduZ$j7f}>l5G`pHDp*bY%p$(?FY8OO;Quk$1iAZ zsH$={((`g1fW)?#-qm}Z7ooqMF{7%3NJzC`sqBIK+w16yQ{=>80lt}l2ilW=>G0*7 zeU>_{?`68NS8DJ>H1#HgY!!{EG)+Cvvb{7~_tlQnzU!^l+JP7RmY4hKA zbNYsg5Imd)jj?9-HRiDIvpga&yhaS2y6}aAS?|gA9y$}Z2w%N?Hi;14$6Qt9Fc(zl zSClM66;E1hxh^>PDv1XMq3yzJ#jIQ2n+?hwjw)8hFcXDQ$PiWf{s&^_>jbGGeg0{e zx4b5kIhB2gIgyS27y+;DfV`%)h1F!WTP!76o?^QsSBR~nBXnz|IYr*$k${m-u>9Mj z>09A!u0*q9wSQ>0WDmmm6hKju+`dxYkybvA=1jG|1`G$ikS^okbnAN=Wz*xojmwWtY zZq{@FnLJg|h&Ci78w-ZXi=9I>WkRlD1d>c0=b9iXFguf*jq8UF(aM^HPO6~l!aXXi zc4bhK;mEsobxUit``hThf!0qvU3#~h%+C7bA-UJ%beFlm%?79KFM=Q2ALm>*ejo)1 zN33ZFKX8=zsg25G0Ab*X= zdcI5{@`irEC^Vn3q59Jucz{N6{KZY%y!;&|6(=B*Qp4*X@6+qsstjw|K^Wnh^m zw8Uv>6;*bKq>4?Gx3QFDLt`0UxmmN7Xiq<$s>g!~1}N!FL8j3aRyuwusB^Rr5ctV|o-cP?J#Un1>4_;4aB&7@B;k zdZy2^x1cZ-*IQTd25OC9?`_p0K$U0DHZIt8<7E+h=)E^Rp0gzu`UVffNxwLzG zX*D_UAl34>+%*J+r|O0;FZ>F4(Wc?6+cR=BtS-N0cj2Yp2q1d6l?d$Iytr<#v-_FO z?eHZv2-Ip;7yMv=O)FL_oCZRJQZX}2v%EkS681es?4j-kL}8;X|j8CJgydxjyLn~K)YXxg3=u&4MoB$FGPl~zhg3Z zt9ULN>|(KD1PZU)Y&rZfmS<5B={#}jsn5pr0NC%Kj3BZIDQ?<^F6!SqVMmILZ*Rg9 zh;>0;5a)j%SOPWU-3a2Uio^ISC|#-S@d({=CDa}9snC0(l2PSpUg_lNxPwJt^@lHE zzsH2EZ{#WTf~S~FR+S{&bn+>G!R`)dK>!wpyCXVYKkn$H26^H}y?Pi92!6C`>d|xr z04#wV>t1@WEpp8Z4ox^;Kfbf?SOf8A+gRb-FV zo*K})Vl88rX(Cy{n7WTpuH!!Cg7%u|7ebCsC3o@cBYL-WRS+Ei#Eqz-Kus=L zHm{IVReCv-q^w<(1uL|t!n?OI9^C>u04UcQmT0+f^tju& z)>4-ifqvfZeaFYITS2-g=cs6(oOxE+d0EAHd3=(PzjT#uzKm@ zgrDe|sc}|ch_f*s3u~u-E>%w54`pHmYs8;Y6D8+zZv{~2!v$2Rn;zl9<~J?1z{;(A z@UoM9-m`u#g!u`Iq<$7d5R2hKH24np5$k`9nQM%%90Hu&6MGS8YIgT?UIB{>&e~~QN=3Dxs}jp=o+ZtT+@i3B z08fM@&s=^0OlDN8C7NrIV)tHN@k(btrvS=hU;f^XtyY9ut0iGguY>N^z5G-_QRcbC zY1in&LcJK1Gy{kQR-+*eQxf|JW=##h%gG)PkfBE#!`!l9VMx=a#}oEB`ankvFMAzGI$+YZtR5 z1#tsKLDn{?6SAY-0$IOK4t{yC)-@xeTjmW*n{|re;5Zj0I?(*cntWv<9!m=Xzc)thU&Kd>|ZN$$^G_#)x z2%^6f(ME|_JBHgD=EEJIc0R()U=&0+!(7cWHJKxMo1=D#X9X^ zrn{#b5-y<<3@jpQxz(mDBys9EFS5&gC%No+d9<9`I(p|yOCN8U|MWIe?<88JU1}F$ z65mW}YpxpK(06$&)134EYp_b9?A<36n^XgK?+NsqIxAAw_@(Tp-w?v6(>YT23bWyZ zk~QuSf%CmhEgzU-si-Le?l zi<Y8De#UBk7GH}6lp7u4ZWWW(HWvk6HGK98r>$Lhc4g>ap&DIbg26pN+IKTkJ zj5m%j@9m+o$P$$I!#9sR5R0^V@L^NNGv^d6!c6ZN5bxwax7k%OpKLd_i@oS9R%8#E zOguV^hwbW1dDkx{my`)5g+*i`=fWpHXS6_nmBZR1B?{kB6?K=0PvDypQp`g_ZXmio zBbJ}pvNMlcCGE?=PM>)|nvl5CgjfTi#%PTW40+-&gMw{NEtnF+S~(9qEfgfDG^6G4 z%$l!(mS|w3m6R10{XU%-Ur0t>CjI)`_R)dXqz;6O(d3<7PL>M_R%b8%6DaTC^J;#i1tIdy>{u!xr>XSQX51%i%eA(F-EG&?U3Y(n$kgTebw z*5Ia#73$3pSKF2>3>E&PR7fw#DEU;bDP7H_=iDgSbb#c^bgLQP$1EJqp!V1){_wra zF59?uP;Z@lTi7ryb657UZjutvVVOkT6$~??*6|%Rc<>G0dh(q_OVcx$60m@FQA&sL zfT*O1>pj?j0>2}h+`SRQ%DG!)|FBZo@t$e_g0-S3r>OdqMG>pIeoj+aK^9mNx16!O z7_Y)>4;X8X_QdIEDmGS_z)Zut1ZLLs+{!kZ!>rS_()wo@HKglQ?U-lq6Q26_Rs?#N z)9_e6|54ab35x_OYoog1O$J@^GOgyFR-BQ#au9KSFL3Ku3489qnI6QaKc`JoyDPg^ zDi3~ zFkumPkT5n=3>cI$4y%}(Ae_H+!eb+hL;0W01;%>Oq(0LM7ssp8>O+%V zmDC^L*Fu(}l%Hx*h_ZlbpuhcNVU~)(u3aW~F4l`abNHXu3G!^0jg}1t0wVPvqviVl z*4n&FOdwTl$9Y*C{d+BqOpJPzJ5pqch&V)B+BgSX+A^mM=Ffbslck)9h)zaqElW|< zaiVEi?-|}Ls9(^o<1${kiaD?DOCUBc1Hqg$t(*zUGLFyu_2$jzb$j*Rzwak55Sb3D zBQOlKj)KDu?6F4rqoOEyb=8zc+9NUu8(MTSv6hmf)&w1EUDX6k zGk)E41#Er(#H*^f+!#Vwq1tp~5Jy;xy)BC*M!Oj+eyvuV*3I>G#x6sjNiwB|OZN8e zVIIX=qcZHZj-ZHpGn!_dijxQ5_EF#^i>2B)OK;Sy-yZo$XVzt_j9q-YZSzV?Evk`6 zC$NlaWbZuB)tebCI0f&_rmIw7^GY_1hNtO%zBgBo2-wfycBB z*db(hOg4Om(MRI;=R3R|BOH9z#LTn%#zCSy?Qf!75wuqvVD=eiaCi7r+H5i;9$?zr zyrOR5UhmUEienla;e|Z~zNvROs1xkD`qDKJW_?BGV+Sla;(8$2nW%OS%ret|12;a; z`E{Z#hS)NP5PF$|Ib`}Rv&68%SpPEY{~l=$!$)u*edKO&Lc}y!b&0L0^rp4s%dR#p z&Rb0lAa!89w%6_piY4(I@-_px7>I)K?vD>PO6o&HRX)65xFFC@m1IrI+!QDQ%A{a# zmbl4N{^INwcVhl<1YIW2ERZ#wL3d6g*(vTMETNjPZ5Dw40)3-NdH2n?7Nh+W=A#IV zR8ny_^+GY|#y{SwBT2Yu;d*mFqm>x@DMuwPv#=^Z3b7?G!HP{rQWuX(0hQs6<0%Tf zH6%>VCi5&)-@gLCq!dOCUITlfZFq@J2-eBXEpGiaPsz|N(}t+~!V!agF$|5<%u)YX z0`N<4D`wP>I_3S1LL%z=*o`9$hB_7V#%Yq4Q~rTp<&_YN{g|gU9i(1B_d7l}iL6Zj z-<#a0p5CAQ&F2b+?uXUv#vk+p0=i(Xqbm7R;1_TukEVny;PKIT)s&(PE~Qc3$Q8 z{{+A?Mw{8ajV#H_*i98t&3Qtt5V(x0G8PMp$VJ5>HqoymH+V3RRQXLKocae7bawv$ z`JLyE?M8K>eOH`+aFX=tS_INlAhueE#lj|qEp*GvJLZt|wee$As&+4;0i-1=(S<8g$m3Xb=#BWA0>4=j}1$3D)zaX}Q=oUvOk^ z*G8i{bP{R$f13(&Bv@%4!0}n~d|tu=4$8T7p~mgvKI_8zACF<}1^ z2T!5zg82qwbK-BTWdGH#74|81kL~SQYYrjQ$I2ygzB)uvzS!zyH@kIbvnHcMZ&U$h zq+N1$CZR5Y2qw(GxEM~)!j$edV-jfeN`L)8uvMwk7gw&i;sjR=9}`q>qB;toio7ZJ z;57Za)8J~a)%KinL+9}ShCi>x8hLFcKK94Ew2zwm>sf=WmwJu5!=CvcEMU%wSWcDY{lffr`Ln!Vqu*WB* zm|=gzA%I%wGdVshI$arMJQ*i1FBvfIIxcK?A|vEFs}|1mtY0ERL%Sg*HC&n?!hgiIDq|(#Y)g^T%xRON`#>J+>-SyaWjZJ#@}e8@R;yVcl)vqza?DVx4(E%~O$55{&N zT{2{U;6Y@lG5sg#RM|zLWsf&$9N)6ORZp{rCCAYJIlkI}9_WLpLn|}+b}1IN-Cuz7 ze(Ao9VI*_Wa7V>iyWl>Pe`x1A-zQc2*tLF-w`QUfmv(O5PK<=ZoWR-;gMko_-RA9F z6ERTL6?g*aZkeyS!)4qACG4KV$_#|Ti@ba6!rT1w3amqq9yP}9m1hV$-~9)!hdS<@ zeIWE`dsZg*#2YN;?ZJx;d6rtWudEpbNy9qH+7#Idck6NN2)~$>A|)8W{w5ATfDn^p zrkpo-Ft13BWQ#RlSm97m=}<_U{m?I7ZT*b?p5Yw^?qD%r;u96}`y1p5q8s>CBzb0< z9Yw8l1oLhiP|iF7m3ShOabR`)#w_g%KJ80S+Jee;g`Bi2w;d&Ef5hpPGr?ej?@?in z$+JzNK!N1SYh~M5&#c*Vac+leQN%Wfdw|hY*?CB1`S8dmVer9}RbmWlg`?mWRg-)| zAhh`uWNth_@elmkDC-$xJD&5Fhd<&ky!b?%N*@sfd@>i!!MR{oSpex+KiL0j*K?W) z4*WmucKqiVu>OCKD~>A^AXP=rVaX8PU!DdX&Lx0#=hJwC6B}=J2PcLSRZe!oJZN+D zTED*HJ8`{wvt0(%3_rZIe(CyVblz{zJ}bPW#u_=_wNkl;x&mu{Bw+ zHKu~yN`slvxNvTQ*SQpvx0vKA-Z*$O8ob_+^?LI4!Dz=#ReaG6;8M1N06Fv%b87jH z+)BJ$Uvk0^nbuW}2^EFv;ilA8Z5+$!?0#CEOOec?WMsi3H}Hlh*N`96xq^?}t+n!= zvyd6n;GI!|mX|la=NIbK({<)6IljR};&OBfmBiH;49R6^dP0gKS*D$lF;sKX_VfeVlea2Qyc&L^)p8C zgNS|b8Uo9DzwhC(vVPW3+dGS&-V{dt%WY%BfrEklVMAnbNYKb3bJMd0*y6d!?+lJ` zZ20^QvpPDgXOo5xG0%*-xUUNIri#IvhXS?mk7k1lbRY)+rUasnarW-lk0U%jNLzn% z*QBY5#(V`3Ta6#dsRh_*sT-8!c6F@mZp|t0h!2+tSx*_}41whAjUG@QLb94;Um2bR zcsW%39m?x5CVdXHTRF<&FlIt3f?4Q&hBmTeSu~6a=TZjeQb#O#BW9`C{gGR?TnUF< zTbe9(bsJ;20&PefJqcfM|Erf9&5@pDUhxo^UOWRhF8l2>sOE9;N>BvkXI|V`R1gqa zS`ZM*|5rzl$puo-fR&-nYU+0!!};VqQ#KkEiYba##FZyZV8)16E(G(4`~bK6JzDMuJ)vrJ`JvjUZ&7PE{@R+(v8qop6hX>Zql zN%WhroL_|=H{CBeF7pD@9`kmBgA zeSC`r*~jk4O$2q93WFvgdwft4XhI2j7TuV-`o^qUMpO?bfG(NxfR#+oagb#A@0IM6RYV$cSzvH=jYYHm^E2ky!Yg z;J3EoqNPuCR(a%Uq|t({W+_um%W5&6`ka8$ilj^S($F0X*Vm{fSHpKo8vbXdxw|S+ zBS&wt3{IF`-5HYW62(IfGenbS{{~z9#gEESBE;;kL~OnuV&cw?83V=C?1Kgq#=Cv) zTMbbRFu}Knl4TFi9pC?AHX~h74l`fcBbZ53h?^aTWn3f}zwsx~tsCk6f;P zu&HY5B_812M#a5$B4Eq&;Fc3U=^1^{Zm|c?xncA)Q&yq?<->-oJKf*)Qs*obH+2x(FnH|-x(lQb`R5Gdl?o!$nCx`d<3|6ed7R3raL>;n7=qV4|byO!fh5x{2#Vtq7Z0D+qio4lT zZtn~8C9PmHYw1`~*xzKHu02^SWG?I?(k(4=fz*>Ymd$>U+QAU-qN zClRs5z}Z&%9MUWZW$JT{S8Z=+bI??tHG;snJWo$H^+& zUNV$D&)zckKt*O$0hwAu9522A{34ez&5Mr61!_7-37jyZwKz=e@8~y6NCZ?yv?h&~ z;O7*xraDDhV79j90vUoLd#^G$lBk}3FThNgTWpDQR?JTc6#pY5h07ZBUGbebfCf-#PPfMIelyFl*xiiV+z<%58 zfOFgaKz_9w>IJpXJB^zPK(;wy4FhM`q_)Gn9%l^f|G9BR7HnlACCTXo0aGm@s(30Aqqu%!C zu=BD^+qu+L+c{O&Zjz&EHp#|}udvwCzlK|grM+h)>GIfH?2$nRuus5)iTBo*tJd;` z@@O=aib<`dV=~$<|Dn-@tb-aWUX-?7l0vx3#Sm0TnaVQcw?p5q>0G^SK6y2Tyq9*B zwoT%p?VP@CIl0rZo^&%IkhWbd`t+=mui19oeJ`-4sAZ@;IyTSt*+pu-^;o^%@oZ3D-?IU6-_yavDEcK3xqhA;t&txcIA7Lpf(m5p5b3-cSM zzxkM?Qw~IiFzp6T+m(ed>g}kuEngzy=hEN3UpC{@K}NvgBg0F6ZR*|S63w4@H`|EK zbobi^WwJmyPCJYTDC2KQ?v?X+C}X?7;%-zFLrHq~1tdQkfZMvyg(L}Ynk-&SdM{Oo zHXCPKXKu1Sf|^#-cH6dNiF<4hb}gvkqnP!Ky?Si=w?^qdiJMBR2~_A`$u$B?Q4B@q zGQ=ZYEhcDODOH(TqCDcy3YqxXhe*yqVFiKZ#Ut09D$Lg_V>Iplw)Y7(A)%k&BnThg0n6dv?&X8j#*hafajC7Z=HEJI3)^OAw&F;{~^Y zq+Vq4H6h1GTCfRJ^synHxe^VI{T@^Iu2ABOU_8+7()wBYX`?a>!zPl~Tp~lmT4s6m zS!=UZUxBD}oob`p+w^oP9mTLo_hGr>Uz|4j733cYy!S58UucX(*8P{4tNEJ_3_d#e zpWr}m=kE^>#sn6+=ifksiN)<2pn;d}9h0&rm{2^(h}v^2Q)YM@*U`ghE`TAuOPBQi zq%LMOyUVSGoFiUN;N@;slp~cvl5BE+05_i7K8~rPRyxLbVb~SuvZXpbD>_75_3J}Z z&AlK5SZF_DbJ*;_sH5Nep`U?H0l9kh1r4|~wZW8G33FSfb2v8v8-$UIzYI=alOa#J zbTtOz=ol7sN#XXeuJ(#tH{ zRjBq2r!@tEi){HTj3x|iFJbo%iruQ=6v&DAkW12o60mUVsbkJG>Mv&<^p>0~hUX># z!kuy60#ZSSeQB|ewqlJ&a^CyNOn7uNUAzu0Y_`V@>%6kf&60I;Q+P>~ za$iUy6P8UTgB3d|UA2|qH~S%r6K5;ySM`(U^#9oR(OU`$1E8oXf2a2*JEGYGVf&cR zE{=3SPw~Uo*83OYx2N9vSGO9UYfG2by&tlbXZYzuw{Ld1?lZSu6INZ4eFxt2&;!16 z-dfJy(XuJrOaPqP#$evbf(g~NNq6k}7nEe7>8x3`<%4wDb?_p@jS3A3;jC*LCi4=B zG_+zb)E)9Ek@?=}^T+2-yq+o$BkZylg!hJibRn)U!Zj0?BrvfV?>nfk>BCadh8K({ zEp5gWwj#F^U)ZD3;am5GO}RnhP^BNZPXS-=oc^}0hutWW_t*&s+s*6@73OZD8f;9U z*RDgj-%t-nbu}PW^4KZm>x?y~>gAiq7(+3rjvBKJej@m?(5Z)QaP9<9!$}=zw1myy z-p#s2{t*b3wMe!KGUpXr?%IY?j(X}8py|4sH$0R_Px3~s^dRlWOFoZMF(8MFtm3!c z5}fy!oh(F=pw-G7iPGllNl(x-vy>(i>a4B76GKVarn-lpUDbuYT-&^oU z<}-6qO-a1cx`Q=MP{1M?p2x4yMm|oGQ)($ zjq!wIrfG%WBmT3@uV+b(@t%$P$%MDJy9XOvVI7{0y{}ffn!r-)wxvA^yBAucD|OHE z^iOEy{v4n4m4(L9hbsypf5Zny((kaUAa&`^u$d0+Os)e^>ePMVF!DUO>e{F z{k2%oVQ}-q5mBQMmP7il&BS_>#}GAlIvArt-u!m_gEPh#dwz96gJI>v)R|(rTa>$eL1bgJ0%k?(9B22W?pKIl4Jg~Nmz z8XfqPUPnT9wp!Nqmb86!!hdVpKB-0UHT*rKhH%la=coFZ>F{!;XHQfGIH?e!(trd$ zwK=?;#WRz|F?d9Q(VxHOfByE$c7|tgKw*aiM9kOz^Sk3Q4GIo7)h9X;$EC54iar3|MN{zd%afpw5w%VeU+5Z*&v( zKE!zed9qHQM$jCr+<}>6q5nQTb$>FO1JsWkt5jE_o$e8};a8nInzIdBDwkPYPi~&D zb9&lML^jKp)Uxs`N@~}Qe2E%U3EJ&ds=2dR)%w>xJLAAKw)S4I)d?*9t>BldVm(hr zHR6$#P82}d=O^m>p+P^;Z$$Dv@de}zwJWQK_m2~;;EXewN z2BCeYmQUDbO6su=>uX{KCD>T}=}zlLHDd0__&?%N{o+`F`0^fR(AxJDCl~jGIWo5? ze92r^DAe+qtH;u*_Tx-r{9p|tatXyj5CQ-jtv}#{8rF@SjhqVc>F_6Tn;)6n6;$h- z!|HU6)_V=hwlrtS^(|8?`{(DuyjF&bw*h+-8<6B?hBGh~)ALVWFB9_&XFy|NEfg6E za^1eeIe&B{NbUpKA9L34MqcDR$)dFb-zL!U7GR$=SeScuUh_wxNT5}3cJ58l=%(Jn z-rBT1vgO;*7kA3uv^QekntXOnkEGkMKlz|;(`f3Ax>`-)&$!~SZEx&dOAWrVttb0> zvh6QTyeIZQpZoy+5ARAwxW-LZwLnh(Ws2M^qDz2=prk!IDD)pE#rcnu3ML!b;3r2q zPyu%TrK*wr+n989;<2WqNl8l!+5!Ydn8t9?g0eEu*>hHIoqY7B4jVl>?P1=lZ{f(3 zUROu{DYF_s*brO70dS zl0ut8DZ&a*m8HIdNVI6zag_0dRG4GdN&r-y+~Kf@-G?xRJYR;}4ujJ~cK7+rrH`iB z+Zs$!hH{L%GNzokv_7&_%*4aK2a-c0>Z0_fTCz=IdPTm(ev}Hb|MI`7MpKu#>%!RT zGOb|#BLw-?X-BAK+N*UEkaITY(bk1srnEBHN0d z&I;Z)o}v&~(i-WU9lx}pR*>9uyWHiNhLN6Wk&Qv1>PNJpjA)e1IPF>^==Mq{^kq)jyWrOeTwu>=5YaU_P0AsAr8k=$ zH$EAcZu%hpV9l3Kf0$tpiao4EAV5HB;F9kOag&*Iox6mQH(o|Qbrtr2AA=h~9xwSdLLZ%y*>x!`>`{N{p@S5P zO)8giI0iU=Oie+P8D8e6NmW%{UFw%@Qyq!zl-88UPM^)ixCT*b61_Yg&otyQbkyZ` z<)vuFZK)-yHFTcERO+0cZH}mAK1xdXZAtpoqGGh_0~wK@t$pEYQVz z#6e%6dbg5tl^B8egc=QYo2%R$ZK;BpY%?jY;B`jo`@Htl71vD`;QGcra7=JLLD``7 zte&w}^+yPSTz6>$Tb>f5-JmxIet}50g;DX~f@4&m`K&J%uezgHpazF@813MF=I0K# zwZMQ!N2TFM6P*dqG#jfk&690L3;!75jc%<~g_ims{lPl536&Iqfu>X&EiHF52AM2&|KTUo zuzLyuZ<989r#NL(!cnRx*~oRM&HFnJ9Y%*pISgAxDl;6m%KUcK3v^mXJL#;YWMFz1 z-`HX8`;%UP`^3V=%imqqkg&mmVR@}`RZXLxbeteKFT=5O@;SA>m3s8t+soac=O-qe zyFbg)Fuv6(F6q;awd0e-F@5raumN$c;zC%~n0Ve2NbLtK-K;fG>U34lK6M^kmF2G& zk)+CXHCGJV+R`TaJTDUII#W!$1n|UPNV-@O7D~Fz@>`R_ReWW7RxOA$q>%^ycxMJ{ zLya|cLJt1{jB}#Dmv>5Amjm9yYkc2}!AC;SsYi8?8D_P_j=IC8pE1`VHx7x9&Y7UbCs-fNix$IE)f& z%*I|(DN7W-`;E?;@=zqLbyD}lxSixcliB3HZ@vw-QAo^%`||vsb3-uf$oM7rKjjQ! z%UMFO54nTku*E^iB#-cWEu6NC;DLCj&j^^$5UEdT{OFEj3#K6C$*Tbr{HF)c_Jna} z{{fb&LgA&I(B&i1y_gF?-bpC5s_4bR_7$qQg+$?(H#-03hJ+SCJJDreP^ThC9v|+Y zL7xYW4J)3$g8cX4O`&Md0LpRdCtisn(qdhtr4P#I6Y3L;<-h;i^-Lak#BEluXaz-J zc-7zd!~p@3=L7*EPB!wwOlGV`0-!u~Rxt!mt@yS4aoUc^r&NVy@#p^{^N@45iQwB( zZD`3;6K~D8{Yr}=r($U~Lm#3IRmQc{BCvuBEn#r4$Sj4B{;$qbpT%CTt*?1Mg=ux+ zrF!2xpO+n{>&$;VFHxtvZ%ZbkEvkIeGNZaw@!nqSo|U;=XTDv*uP0PJ!0}7sgW`((})@6D|;$_@JOtNV?UQinTx ztIFKH;{TG~f)b}LZiwDij1ISs;XQmOizh}ZyF2<>!valh>%$~o`Bbj+=@OcRe!LQ{ zao&|tAHAxRSQBKF@f~w801}d?7t+nstsoQ9eJEkygv|7-@#Z^fF4NPknecHhp?`k5 zb9s$SLH7Lm-P65OFu(odEmY4VQJ>T)l6R%p zt7oi3TAoe`M*3QKk1rjtA%oHKnr=3A%1$+qP}nwvCBx=fw7jZDW#& zHL<8*T@Mb*)MG`MPC(T3( zzWE>nM5Vr;lnDjO5Q!V*&kXVrCqE7v;q5S=3hb2ym<356yjKczdIU~QCf=dndN0Ul zTn`g{G({HN-fBP9_`GollfMB3&UPEdUwMBXobdq$wlQy{_|puf6l?z9-dn{(MMl1t>#!4^PHQI=tS9oW1h>2^zPK8$$1QZm<7w zE?^uWHKk+7gOix!LS-B<7_sJ{s6SifWWT<))*iUNGBVA0Y+tq6nOp_-sp<0A3YmXcOt$_R|N!Dpy$8Tl&!JK4!$X+Rv=N{;O^eH`e(TxB0T7Ey@=`!}*?MXO7ij4(cC6BffqHIw#0fzIOcp zV`&|l+1VBo`6B{`Y|~4?83OWVI;{pV;K?wFp@Qr)Mha=Q!eF_ zql$279;UB4mF6P7ZNmc!=#00h?5aI=EvV{n17v0aBLaDVu*>qsO@+yA%^diVx&fq4 z7FFVyGA`vw%gSl5@Rvh;zEI)J_a=lF#uF~|yq=!~_RQ1eNsLpOjr%J+0w!WZ99?@4 zRUo^DPwc~EF;uMpWNl-dUky+-v_$;?m-4`M-_WSJ)?lG_M=unHpaddzRwf#jB1Y76 zf$zMl4c#)w#Ak2lVN*P$?3KALZ$?1Imtup;J;nQn3XY2iH&0m|CFME;;kiwRk*Rtu zPO&R99xaa>T^kK#KVOF667{h4L_q#cy}v4Kd6|7KxUzEc#-0a2y6G%wRB{W| z`DMLFX{dseQ=02*$FgEh#o(Z)UxEMJH%(N|#@#7h1MhVWz! z{ak$Kg90_`mq?;TKB(JFo*Z#$4kW?A0?a>S^Zik)5Ek3_o6@QDV_B@xFPRT>Jt63v z#9*dw|5?~c!ahmoHNIN773Vb~_Ku~%)0N8Z&BzD9FA1>Brd@}NkugZ^Ep`{cznY+$ z%EeAZ>SM&HKFWE0nVt#zSvHl4eXf82F<4#qsB0T3HHd`}!U}NYxALu%XNax>dRi$j z{|rT36BA4}F(ZL$iro%h;c1YX8l9FH6nc^r12c`qJ%bLnaQsx{ZWpa`^}g>isl1g zP;_fFXphQc!Tu8|CcfULKs347U5jEwryPV$y6>RAWB!^Y*dSMqYd@EW@B$aGT*!T* z7)o@o9rOW4_gb+5X+JxI=#ip8R_%S80k8SW9|BX0Mk*I;Z_PwZG813N- zHbUGm(7C8w1NSZB>kG+un`?ctG9ygwtgW54XTnhFBL4U#jCfH>FWd+*Qgu^+7Ik`5 zH1QILxLZ)j5e7Q;VdYBF*Rx{qU8d`d>l(GiZTz^$7uC5Zk7)~QM@48k?bGbhx!Whj zKJ3;gX>!o-MLwe0$Fb?Lu1j{6whN`00%o$kFu(4pi|3MJH=%HHO{~#P#T-(&aKnB< zrWIM8a72XR#v_^?G2|m!*Zo2UjG#qm^|705mj1S=uE!hzZy^)UAq$JKXw8kJm&{tz zaL`*wXiZ^5nV2iL6B5rU`XpiMuGt&rm|MGXvhXSAAm7iJp5*!2}6rEiTKfDF#SJm5pZi6uDl)Hw5wqjheZIM&S6Yz`R}%7Pi*j?SUB zs%f-Hp1u=x_H%~_4bsYG3gw3hLaoJ9sl65Rqt|G0z~{0c7Ya7Hj)iF&%+V}E@Ovc& z_(zJjEXC(pGj9X)~rpsbY+w;T?^&b)D_ zFclEt83QqG>rmA%@%183yfvlyKede_-+60fa`U6VWQiAddCu=K zg=SoKEkpTaxPFCzm76Z34$J^fZF%CR`aK$?0hF~|*Vgc3FI$v$(7z?p zjen`&!$VhVlseS9!#Q4^+DO&?iWTQ}&cJSoF{GgGs@eEUBv@=xb8WQ}>49g;>degb zw7AjB=EG}|c9ECb75z!runjX|SA#HEZL0igt2;BJ6PfQu?};YuCVFY$vM>OmX4;3j zkRf~tyldY*9Z*>hPQS!Nkkj)$X67qBs%?d0ZJ`o&5xQ&Ip%I0p$9+ok zr%pnEbk9MC_?PBU*PllR0WlI^9H2GWl2{lKeZ**|GWD{3kW+@xc=#;2Sp#xy1P7vBw!rp(x~(G;ODqCAiC(A7kY4-Js!=t_6!t zM96+;YwCG1RIG^KMD%_P6>fyooYx0_;7EHu-h|01zGQZ*C5%@bEiK&`L-Xtx!52|L zF9|Dcq@KE2v^>mPgRP>SJ4q34r1!~6E^*6NUjWK?L?FU-?bTV*J#SgtTyQJxV!z1^ z=?XgjzKPxAViu9bAr2*wRlJ;#^YWN?#`&Z#8t2olG~PMbB-D%wbX0Db7z$(cd5y#* z5y$+XPQ;wE_zEA$gNs)OFI9}H@oq|wSCM|yuBcAS$@GFg!oFP4i?{R$B_554HjJ*B z`2}!rV1sMJ@Y?I^dx=l?(`g#kXS;oJCQb~eEHBR{(8@e&nLY-A((cE(t1rrN zm=HWf>#8(*IWUp_N9j`|0@bN8lUZ9!S)kkuPNgd77RF}m0X{~h(q%F)^)XTYK{Wbx z{sV2-kN0$ZY0_*+Bm zl55$t3`?zTVI6BOy!lNbCNf%F#1}l=rl#DkEB`ZX5aTuW5kqw?D>{lZu6ygiqcwOQ zE*m0Db$-;-gOaWjN3%|7W4z7St3)gRjJ;R%`|+j6ib@s7r8%ZldCrI4#7pf@Rw)47 z8{70U)E#Da@X43CV=VeHq{-AZJwBdyM;)bbJUr6f?=dGjYMk7M4iWmS&Zh@uvLMA9tsyBdMlkQwrm41CFa)p9eB3-#H z?h|txb4$vWJ=rVsY^`8jMNk|KN)5;df-$-K`q!goZx|i9J?CN`4r;JSge$Ae7h(9R zlVZ&42`HCDYrtdu2tD*2UemJ+#jvA4fe}QYGHA~1l^`!^sRTj&{ z|#4F)+%Y6_z=e+^ss17tLZ!#Uutbq1{W-^8m+Nb>uV^=CsAFgo5(M;_!O1Hm{atl3I-N>kDXv{2KE1 zyAW1C=G~lKv1yFNjiCj(+q+|WL8X73=45tc3tY`Xvw#^Dk$b)rur@!2bgC;KD3J^ID zG~T7G7$BLYNn3~GxC1O)uQapRl|&obXFf@n#34FXK-e?XkK$h!#djuE7S>mqPLtqZ z*Dmz;%#o4C!DH<)*(bKOTZs=pOs4~D+Y`{fUKw=;L!C->h6;hKZIK9yM>hSUTaapOtgn6Y zUr0)4q#usk#t%=<%^F;wPxlY+buu5jBcWQq)KJCZk+Ew1LgyHdNmCIsy|Slj+Ll;v z$qGn#>hLoFfGI-Jj-qY4^BMhb>AhLeqxh6`iNLq|7dc*K8((y8r zs^(cPW>x_Qp$MoVOKg_Pv)vj>DIHufIf=X{$8Y}*$`<09GZ6$|!Kp2v(4xSYhKx>k z1Kx}l&j;00Y(HAvwt2MF+`LzX$d8mDwg>OEuP8-| zZoYLdOg>C{VX1q;?bD+pT*Oa^+7;&pgKuuqQ8y_myutFC(np zj48I}aRV+jtfk$>O&3vZ9r23NJt_94rxRKrfv2d-eZ2ZzvHqB5O^kL{+q^G{t_6#% zeo-?5JTLm*j%T85U`#eo28rUOtyub~pa*!`jWxH8epQ`8QuMKglT3nQ`ivlJN8LHM z0W;&Vk=CzB1?rtgSM3YK(9*_9@p4GP9kM1Ig@8h{cwc?nwS?-hLKtog7T6;FpeaE@ zQ9*pu9uPR1aJY0*kNOaNh-)FlE54^ksVD%|!l5I@lo3S~JjiLN4APbO_Oi2u>V@w0 zGg#%-BZv=lSm z06?zxL%4AzSn$W(_mk~HvJoAz7aEu@4A(d5iXTCQ4d@@!t02~*Vp(xcc}D|Z;FEZb zq-Vwzu$<;{JkR4pAWe()hw~vekzhM%!};?P)%?0jiZ5U;_{6%9O%E8BzIvIS2%1L{ zATR#R#w-##M&&!kRp9fQqQHeAk{do8rvpg#fD{>rwKJ2h_aY>|A?+Pw@)3fx zWc#`Mg2si`URmQGksFEXPe`*ol*orX)+V8Eno)m1=Va#vx7FIxMYq1TDO53r>kN=3 zB&WSS7*$Wug8E9~ybpoQWFjs!X9{Olhm*_>&eVhwVU+M_i^FHQyj)gVC%*PwUsm7h zlmE3icMMXez8aj4Uej}~;Sqt@QQu~b#!z76`J6S6q@|$3GEXPt%6}?7CJ<)n=-;UMiS0-)lp@hEd;A=(J>5nrC$F0wycd;J*UVVf+A4*rv?bhOr%L zx;&>^tM|H0S~kC`Qi%o1269k4BKv*-~Ovy@|sg~O>oTk7AdWR-jt>XAVaV1yM({;bW7~c4Fx<=L8(lPu0K`~^k zP(3R=N~7&YS@x?+39JUR3>~cprCU|AtQ=7L=Uk&FX%^O%8w@X~b=TX}duLQd5U^U;)cl4m3@{4 zkuz^_&g;|WWbSz;$6`lEQ3?Bz=-P0o>#b4!6Ea81u;%&C=+H-xZcdLrnj$VCSk+xI zPSr_Dm2!N8>0RJ1GoPATro2z`?cJHW-1q#+a|$oP40?d@Yzcik*ofkOUQ5$NJ*=%P zK%WKheP-Edk(O^0<~z~wQC1O2=t>mQc9PqeUFsv0O||`4?d)NsIzM9|Lcm@*C8QFD zE92qZMf&fw8GdUs$+8k07WdKqdEtIseNX}Dh44zc9v|oqA8gEP$LwJ%@WjSbsay5W%R?173^hLb2{`BOgV(k75`JR|e7U4|~L+mJ71xtz^|yj6N3 zKI$4hwADr`Esk*A&YWlEeUo;}ilTI?=CdCD*^Eq5eIrC|OIEpl!tk~mRqq?W1MxO= zT-SX&)w2eJ!3|hzPbJY>KKw9{-f#}zvA{2mr@0p4ZU9kAxWU&av&W7Lk z_y=En#~H{N@J2F5+Q;kt6uv?=KD_!dfHU;N=P4q}DaKnU%qg5T%qjAkQ0s#UdD~oi z+v*e&l{w-X91DOmAWzy&Fp#M8XOzqc^|~+4C}|Q{ZG&sO)v95L4j{4MRAgnd_{o8( z-nScjhYn;{uaSpWzpGhv>!?}|AAUYRmjq4DI=fZm)l6?uvkfM&E^`6R!!=}Q)cuxz z*i;8|(kUS9WkdIE_3JM>T-U~0hO8LYI&GankCIhh_zv~DwoiRY#PXWkzcKUI7#8DHu=(ozVr z=i}8TB-1-B#+IwiN|`2CULcZHNEJh!Ju)!txHW4UwLFzOjmgXu8GlAhb?%d2;qM;! z{SG;0IKL+=EXzp;g$%oGs+yXZa;cPYG;AE4^C(}*i+&5W%m=tj*1=`Q_IQ~KOXM@g zh&9LGHrv+&B?vkfs<2e`@VvAz7E|RXO7+wfrX^O4dFgivBT9voC_V{AsK%{$Slj0|Cp3j9aSbF58I#jRL*ABYnEJ*gK!3GYv6?2a4$L2mDIA>!D9y1ZJ z-PdVox@E$9YidVU#Rhl+>2}e*B?fo}$o4d0ZQc|HGzBPkWvApaN6_7Wdv#`9yLD5E zO67O<8PVA2Gh$0Q-XFOrD0#mN-^5gfp(E=wIt^n8BLF~l6w?9XHP`_tf^L>!) zC8B){UAkss?o2A?W8PT70{V?9-w<=qw)(aq@A**Z4|vkFhC3JTIVOs2!;L;z>oV zX9Utkz}N*H?VA-lpVN+$(7a=ka>8)N28yoeqX^Jt(*Tv$C;ml6yfDN2fFfU@Gxp`% zI#1$T0o5T_QmvaZ7R=7+`{`=iWO%z~d;APB{;n2wbB*LrGOys(Wey+;gYSGuV{Ml! zOS(gc;f)sI_l~A^$CI{pPQDG#xyhhD?6mj}PS2lU{5SKCYtI)SzBK6$gc(lY4IHUf z4jlmd%bR1Z`=_zAfIWtN9>H{_MfB-JA%VDWDA%mnEu^A%iC3A4WCNRt2Qb_sFERIt z*$DB83-;me{`VINKS+nrz2>o$x5BRwN1sB>k1B3x;z#EaXgX=`sck5KW$&^ofFul= zLP+n4I8an1-wbrefi8w>5*)A=MravTd$w0s91g#l`tsvc7N#2a>uGtC(QO zpoDD%&4$RrxXaq`#@G!K6{{p}%VN%h3t2~et-S%oxO6M#g0Q@Rg$%zu0>mf(L7oBt zDGRK}O@s$pPMtdEg1lVqsvt(5c{{ge#li!Y!necl%bBlHAO$b_V!Isit|JI(LdaQF zA|6RB3A`QrBfUY4sQFt7V(&M_0SRD4S&C}S!Hfv?Pq0h#djQIg2M`y_ zQesg4c^DMN5E4np@bI=_ev8xDcE^0w(o0q~a6xOzL%X3TBh} zam(7^Km>WD7mJiolv}c4n|=B<@qj#rjssux2^-!ddxx>66mt#klHjU*pI>|rPLVTk-OVxlPO=%sq@V`D4YP(Rq&x0 z0v%Zd_r^7*rMT}X76=opBG0m^rpSjFMFiPh%iAJzi4`{p!!SD}T6tzEC(f)`1)*hx z0{~Q1m-yW|{h`o1fezEX8EP^JnrAq%8}9kmtf)9H%U;DT&W2nva}6ma#j@7KLGi~& zkY2g|{Nf$u#ZRGOe9vi6|1qNYMG$|Y@DV7~hNl$|>_SI`|;@ZpB z)Yq&{gsAUtY}=1LkG+5RdmpzRFU*w%pHPB0#j2vTquLh}wdH6AY9zY##9$KuGAPd2 z>PF;yErH!iLuZr(Blr}lyYXmPJ5f>GvN}=Z78E|*fUT*5lI|O#kM3}tf0 zbFRIHCg)nrXojcfY8D%Gt0b7kl~&4IO2Jkg)F}{@@LMJWp0wcSHqquOz>Mir%-6Fu zv0k?=kb`ZNd?zN^`HwZl8uy%L)X5&kz=Nlx*CXONUVMaK=L=K`lh%cbpO?3vU$b5F zoIa@9#GHDysjaP^Nc@G%$P${vJ1?J)AuDx@xO~z&W@~AA+f6owoVl;7K@Q5?QXM|J z19}9Sa;3v!L`rdhL)S$kU@>JJC#LFDc1?q`9>3J80gt`S4l2N7zc8pJ{&^=u?3}M~ zgsnNg&p*#MmqCBEj&gZxYAMrJB8|0`bFOYQbtuWqy4y4Aysad|Oxlwt=p8a4U0Q*% zwLw~z_f@XVR(5)W%ETf#ZL7!*4~=B5)mEFygD|R!mKsdRO|7I4z-^Epdl*qY)MjV1 zI0qdc7Bn2MXvC|RJeTJE{mkH9FD0{@EsZ^_7KvINcah2o^@bAFxV-YfUOx5-4$@7G zlQCdT=QHhwWvG&+G2Pl9%u=N2Ntcl>P5 z1E`>-CJ6Uhhf{6~(1G4nkAsboN{d8d6Z=LAxnwLy3K=j3{)f!x$_6g{C)RqEa`G%Z zjsJ|P>TQE{u2b$Y>7ZqyHk<20t>nUK- z;wQ_VP1v@I)07Hw6gH=O|UjlM7b=-Xxv+vWN0S)A15A(e4L z_mkd8P+uzT0d@#3xZC|+lK#pgpQ{&fcTb=;ab0*KkttdhZ%LHMdsMi>W-UHw?=ifz z`=bmu=$2YtS;?~DOdT?oawEzParzc-al;4VdURsa#cOzhGaJSStoA#`Z2Q_%m4!$g zb@;Ev7|Md;E>E0+gHha*PmF=m+LUF{A22 z2L&?6;rw+Q=e7Mzgn$XYa;=0v1(k*)@S21}q_}PSC|Ub69NJfhb%696>^IGkZ5}7I zOtc#>+&_K7l5g@O-)~Ce{_N1ADo<)yfiZ@WsnVoF7O0RF_GlyPL89lbOpWgdJrw5g zo~Gh00!BDFiI!6GM~ufBSKv{{zN6pnq2+Ph+q{D10x#So?Nm)=;oH~lLZ;57mVmMN z&-%7yUTb=4y$g2E7d)Gw5N2(fi*a`3(a;yUM16lmRy~`#^@Xw zW#jp)D3~YC2dZlI`~ z7qW~=huPW8cIp`zV@I|bI;XKs6lz&QYnfvcK6Iet}7TPqK4(mv?v3g~ndHVx`L*`GOOUA9Oi*X1kLkkytv zDE;V6{}`x$P}AGq(Sx?>nQU<^^k}o|0i>)5)_X*)^wfLMgZcL?2=sB+axUb_n?t^b z5e}iqUY2W8%h^CJ<%h8N!$}SniMU|(s?*@k6m!7ev_n1`ysU*N;*>YoI}JoZ8b%26 z_Q6JBHBfSZ{}I%2g|iq09rwb6kBAjd)*aJLEiknx@+TZlPk_S<)(o4E@vZed1=xN{ zwdPaOFD;576X;htV>?`<9{SV7!hspd^u;O_vn{!z1*_c2YH$KMrEi?wCK<3IiAa>N zmL+PkhB4W7%v8Zz1f~j^Vy&hMx5^n?Y_#>7t=5_g6}w`}GRGyh6PptQtq6 ze;~To_HiD(!7&W!F|?vN2+BGPx!Mmv*_U&yg{azxN87nTx9%DlMDDleJM+O-5gyM4 zQ`6}3u8@lHMdGCZiagMci%bx{S`q;Ivt7(Eb*WWDiz{GDGiMAWlB3Xw06$RDh~1Q= z5Efz{my%J~We_=4Iw;_Z-P? zo|y&16$jm$bNsStJM~WhXRID6Hcyb8?Lt-a;u`(tqyjUCEjvq<)V(6}+~D zbGD8iwr$_&i=cIW`#$~Cc;FSDJF$Z+&eUy>NJ?*WsI!rdyp8)Q`L| z(x0O&O04-Jl)Qscb{B>nVK99nYYS+FOA~WS`4^)c7inYX;212%OaKtOC}k(r(cn4> z`X;bBhNsFHxPVnFo7zSTSG;%ca3-W^x4z-Vy)SZe1;$PHZ>fdJe-W{)5zkD#j( z%mO6tB9NArhn#?xUVyZ!-WmVaEsdOB0<&OD6Usv_;%In>nZDFks552Ek(d}_Qa|UH zbF_iFQHLSnbH3+@Tt-A*eZ1V0n{%$F80B6h=5I>jlVV~wK$s{V12rkNw&R)a1#pR8 z%lZM1e$k7^5dmKS%i;3HBurkNuEj!D@;&CUK^gkDUT@ec^1#6Zyl>C@fe`<e1f=9shLYzW(7eF^jtF~B`agPh%;%V3GeZCCm^+68dYofH{?!QsCVe``MgKo1 z6~R9uO#ckuDe)J`c|l6>ALX6R&%3hw%r*)C145Gi3$l_T`g=$JNb&pwl#%-cl6|W3 zKmo^oqX4ll@xX8mfusgBK>bTPFe-~rlMJZx1px?si~=0~^vYQScP}l$h-`tfR~BG5 zcEGP!0$`-}z{@L1FungY1i(N$T%heW3c)`Fsefj*bOt&)i2(DDP=L=aCm z0p|lTfdsAue@M&@Z zzuwY;^@IZZL&$-DK25I7&t5{H%$*1rRo1782`spi17j=%vKBA{@$TusZi<1T4_H8h zdm@7WN4Wt3A^Yz|eYT~+>m{Ec0$|fU8<k~{XdsT@Xx;Se`3gMKYLNpE|Wq{rB@`RXuCYxyBgl z><%p92CU(j0Q~gDra$G3KpD{EZeUQZBHl%z6J<&bf!0?3ajZ)Xo&2Z2)ZjvNlVVH4 zA0mH9Yd}0y*7T$NE-Th$&M|mRwGA8f``7f$FQ+~pJ~qF=udjOyVWM<$c2Z3xvHCE| z5%Q766A7Vf7kKAwtZWh({9$|~Zb@?QJLQltDf|SUF>KpeEnC5j=>;HZCC;ASZX)X! zs@%!SMp$1fgc(SkVTOiMiZ|4 z5jHQL1+#xl5IU+B z6H#S>cAV^J_19u!WRL+*$Hm3M`|;R)I!_uSJe_tz@%^bS4mz=?gzMzk;X=)s-(-V7 zgWfrw!_gx8LZKe}!1UA%TGK6FM0d?AwuQAa`q74=`3%MDSPTHc^1m(4I;=!W$vnt> zGJ$M{zf#m1X1TIh#>;4V%x}Yg@JglLQHu9GyiGW~6BgmI6L%XOo~(_08hU^g6Yf;N2|X_dj6K;D8&9t0{p%lPCJP$?BYe>z z<1D`Nuc^95(GVaDu0E$TYJN(8ja~T|>j{(z#UUiQa=ITnO_b>ibW5=1gUXPo` zzh2wLK<+&!nXf!ZeQW3M3sX`n5edG}g`Cs%`H#TGI_u*IId`T7r6kYg7O&+?xNxB% z3|OhB{Xiu@EM04RbY9LFTuvw^xuP`l+7dE9{UMA2T@_%D1ZUXe-m9%HN-y#a8lM6F@&_ZPxMV8lEOia670ShaHsp1a=mL+Ti*p9DT48nWVl*TWE>a#m&x|)f^OFr zqqreScC}o{i3#;wiWm(oU1I(8GmCl7lDJ3kdbX~({nYHiDXRBlkJphO51Ku?iX87JRU^YGBHCrydn4*4YhczR9Nz7~sIA+IgYF`h~6ZAji%Tqp2MsCx0_bE0> zvAv4JkHR4*i7a}jx$w{JH)_`MXZ$QnDs*aj%5c~kXmYKIF#2B2+ZL^8xI_&q66kt0v7lFvQ^T~kcQUa)|oFNh>dGRbZWn$ zHInpr6%DTg;ZpvN{LXgN(|_~#Y4!D*&ghxhQSi&hDu@LY$guGhJ3~XMS3_7<|$Hyir zfk89c-k5)AK^H!bo(gmfL@_cJswK3D?3rNFO5%YHm3FvJ$uH>QN5g`$L{?v zyHIrfHD55Fs0Z1uDN$ebaA0XZj{_|;FQh;}uIlWrvSbbB~ zi`G}R8oRPpx3wypk7s!0rc%?Oy{V+vJTszq#@TL3@6!W8s%N<RpP?gS`!f@4AxMZbGib$tfc2}#W%7sVn z%2FP2F<^k8QX+Dt+zQ8&+sF*RG80m(>-iPsup%FyfCIVHdJ%)@(9|lBQ=ul$<-S!3NM zK43(ntb$6&5dkru$Qci9-SHmWAUA6I)sGQr2-3-@l~1)1w=4*e@ zAq$TupiyE-lvZP#ZCEe0%=Xy9`0qBaT;B*`tD>X=`{&RCWkHqZnnOfPE%T1Nk4L+P z`%hyPV(c4;K~AVU9DB3pEytRk;H72V2Egx_{gD@y_9Qi1Bh6apGUQ?ZPM#q3x{%Q; zykDqC#_k)=JLCO3rfWo|hE%k78M#%T9vyWwM>Ft6oB?WhtEF4PPiR(_{)^1N(c2X1 z>&E70n2$XV)5@MO!2X9w`dBwPUK!icIQ3>kbCIqrYXp*Wqs>1i=f}mGYcbj}G{7Dy zAg7V&k6-ZDh@3M~pcpY(oOHk08b%aT^!jadPefl$)N95VB{%6Agsj_EE7Vn zsn&8&A}v&jjcV?O&XqXA&QVH31xWAhO}I+q2RD--2RF|uKa|id&JbL0ka&F#F?Szu z$9K{~#q+cdoZye+XW&1LoU_((8(Hl(HU>T07)k{78Al8~kjOrCkiQ+lAFLqGL#q{n zi0Ah}E<#v2V-@Ak{UMu-oVWQBP5y@X-v)5&aEmGj3IYjo0}cWrnPP%LkP;*dnF2<` z1bk{&=v6{g6+x5A_L~f#7qE<&?*?Bkok&k} zcN7pXYom~I`P@#n-EMetKLhWM>4I==aWXgNj76Ae_*bUM(D--_*i|@HSX3;exk~6l zDaDGkdCjHUdV-C$&!x3`2=gDqc>f4Q0<5p`>nC$0TB`Yn=B(aS0TFSS&k|ez!Y`(U z^P(LKO8D%3sL1NP|Ik2IUv-JL;$Odqz#6*qbF@T8BjKAo6WE|Vg>{4N{A1ASQ{Hl; zzJRwB;$Ot(8=YejI&K@@DI_4dXwFj2vF%YI7Vt8<$oe5)Z&zYZoDh$Vy=vb51Gwo2 zMx`20<#u)-<0XVD<}GC%&=SOM^()^!u6piF5=`EW7T{wHc-(!M*ADQ2Y)gFU@vmcT zGfn4|3RVNBnzw_}l_glVD^HK4aQHf%jc^AOBu=qwFIu>1Z5EL}!S_Aj3DuAMr^zv` z1iaqEj;VJ1-emAPVOJh%m(cJzfZ-(BpEydBZQ@2K&}p)SC8_Z^OJQQ2e`>xsSvEmk zHkEJUUlbQiUu%5G&UuXQ>YUpql2PnF#iYGV}A1iLX0^|}&^0i>drOvAE76fd%*kVw zX-Nv3lNzX}%wvC0EWp_QG8V^)z9ywPRUfT72mduX7%+yjjsvbPF5x_gvH}h!wf{?H zTt^`APUsf@8xl#Xr@hKo4wrX7#c0>hV{d2oX7~O2;_Dg7N)Tcp!Ubo#K|vC|KfS>~ zlBUHKD7ySZGA9-Sl^dBm!%J+!3@SFnh_i0i9t%tE!+{>G^8;>p<}oOicjMzsT6(f# z%o^M;vqMXgj4<^M?<2h(pgLsy$m1f6{(~gHsTFLR#QRt}DCx4}W*yxxkCg8vSu!g->6+C0q;cyzN>^2A?5w~WyH6<7?cq0019=-7~0nNf2?ZnPI7UBUo2X#NKq9DZi(W3B0P-)!sXICls6_)zo zdgYO=8L#aSg}Ql*DAfF?rZyNI#O-7{C7UQLxf!q0o^ip-{+8LR_Lwg{>3;K7W`QvP zgPmJCJG#T{+n&M2|JcN9xm8Dlvo`lL{=tOt)`I6cA~rvkM0lP)?fi}>SE(}9)R%j* zX&c=8!E%I%3$F2xav7H+p#FZrNNqcKs3`20eHOu!u&p$gL9pIM`B1lgSz(+tPJo8m zD$ES&*vqw}12^}MeSElOx4;`=hCYfmU?^mk(+uVA75dj)NmaN1((uNaoafgHPAMzX zF|`|mmvTE7RA~{s-@ZJcD3edKh}a}L#D1=>F1x-WgK^r$K*0|N z*z{tJ!f7BpB&|baka7eZm+?xG7iR4y>Ow?a3w%pK=C{_To@#Bi$N5TFDPNUMXI1sp zn#Qd9^5mAhmKvuI*Ud)h_+)ecfz#z~AOzDv(7VrAlWq-I4slDNx=)5CCS9Wt{yCBny z#;S_r&)WnQg3xfsUaI)dGj? z@H{H^c92>dNv;UtL-{EKhd(w!gZZy%5psUBWx;jsoARh25EB%%i^2 z#nnCv!IaG$oSkbGH|VDX4{#jRnt3a;KfD&2S0%29zZZqg8Im%|b2-HvilV!uq*!g@ zEODVd^d_Cx+-!_EYd_pz0sCA}xQ=AKtnRHY`%f5s4I|`SSO&s%0xOw|sblvzuelZm zj1`{OTQ%0GT|00`-uyNUXyrRkuF^fDs*5GP2^K>09B>(<+prqh;-vSVHIpOk0WilS zoTlcky}U}?24E$^xGVU9$%!({Irkz+OOYZ<n%HBptG>=$c;rjV14YBBe%*DsL+45wzFIEma4SXR|AGy;;9Yxzy;w2NYTu2WO#| zr3o^ruf%=Q1I5!8d)R3ei^+X4OFzp|aK&_5OyKve53x(Em$69~A;js0j?Z2w;$nz@ z9AKnIWhm1in)P{O02~L?;o>q~>+0TP?`Z^tX{yfDZ7A%x1uH@WNXFt@~{mW}CUBduKaZ{-&j7k9XW?KXp7 zTRIf~@YmhgSmTZ-A7b@Ctga|3$2R$EmA{_*ZjhMP3I*Qj>84xlJCMN>&zaw8nd1C|}Y!i{;(DhwG3aHmzL9Q^pd&Pf2(VbirC@PKuF~A+EXi8f`@g1z~b&+`y zTx?ZOpZpM8-u1JNQWmjN6Ji-eUMD)JsEKes4PS514ecrLC_3hs{e-dwu!pR}Vkmzb zNj#h*(|y10A85Yy<*aH+QtueV27Md3+?^zTkp1uAtQPojP?B=ZDgziOEgPece_P@0 ztYP5L{;Zc5--K%lhK9B+dODXSr=^TCteKyw+BR z?GaB1ROf)&i^1mg8Rp^D5G0&K)O54bMG$PtxpZ@bd1u{p_;1RxhLzfe-B4>PApzxw z7iKx%w-W`e4f5+8%Z0N{F=T{&$!C{>N9W>l*A_8Cj2h2Kd;>t@`C#CN9_96%h1f>=)L6v09Cmluf&8dZe&(31MBhp=EM;G&&IS)pT+P^yaLR3Aj7SFg zx6$|yDI-ot=psOl3FFqwfMRk_{z)di_ut5VCA+7a(i{D^xb$IBWNI4EvG`!W zbux^*!(}@jXAZAIa}b@PM7#Mv^apggmNQ8&u7g;GMUXJU#gTuSE3L1E3&R7eaqT31}tObr!fms}D< zk8B0U_2_g5)>upemHAbOdX5?WR+HmA*Zu6)RiR9Zh@a0(uFJ24r-=IR1&OB?(``L` z@JLi4`-Ar>7LXRJl`2gzXB*ZWbYkd$h;X`}3Rj)XQ zAMd!IFC-9F_!K5Znz?|XJXZNnIR}kx3v8skhevzA_~LZGh2x}x!ScF0-K#-7rCU~~ zmYIHe&CZ-Exm?`2YK>)&WjCL$(JZrVIi5zn@8d7RcFqd}TY%~W7h#Ns?6Gs@ObmCZ z;Fl9|Rw|lO9y2;_(GTWdB-PSCnQLXpy5TGv>Y;Jex}kyl`H(r)Uls+8EaV&95fd3j z*tv!O_!o9%;*ebo2O8#kq}#+LVlT0%i4b2&(V?b2Z^aRPNIQPYp<8vtqU2ja1vsb= zzQi)C{9ByrBXPP%tQ4roSxQEk;(sHI5*XnOPY(U*XX;~RP@Oo`gg%`gbwl4^N2R4*d7&#i6agknUz&v6k!GgWH z#7<@l1&9y|V+#C17Pa5pKVFd^d(wuW$VtO!Fh3nI=XNb{@)-E}?-edcB9+3NnXE9s z|Bac>R51iZV+d516jOp;M%s-pj*3*1+h1cu4aJUh4ab*L9@u*1!byg(ND!gsgMu8c zt+K)6tNq)z-?#Y8a1XDU+vRw5RyTPyLGyAWpFq;>ca#%v;F&GeRs9}6O{`_Vwu>a6FN={o#)u-E1Wi~x4(^x zS$?FDBxdkT*p!D=V=jmArQd{~{fL;J@g^O57uL~-;~~21%pc4!0Wn|@r4I165%mUs z>51VcB?A2xi+Q45;z^#se4f}Qy6{=0bUHn;oY5v5@%G!i`#5eBlR1*3Dg9*OTv6+M%@_3bKR*{SqOA z6bcYxUBkjcnpuGT;bg;feCxZuO(01$N_A@_4UVed4?;A>-OT{qB2y@1Wo2pA_iAam zB?JIpkj#-*0oXy6DVb|YqAHoCasp02i1Q!JX0uoMg(q7lv z?a%#xop0B(_4HQ7{#h7B^dtCU*Ze;4pFO&*!^~QF`K6DtUm?q&-BC^2z ze^wj%m!;=c=`<#-s76bOc46s+sxUMSN#cJRWmV=%;;935PE*Ha@(#nDQE&H_>vz`jQ?qT6W;0)JIz|F->;Oo;DS&&4{skDh?BqJ6A1VS^f`po2UVT4bo z!rDqhLE(S)S-Sz>wy`qoC;?>a`4yl8KkTv9n%9Qp#qiy^;X%!&`kXzqiPFb#=%|YD zd=*5}9f1BjZwoqL%R!@em~200;Q=Q$`$9Kx6-C4t#j*DKm7)1KMqr#ZC*A?|Nx8$X zX_IXqDm}lyOEp}?P7;M9mu3ZNq>-6mzikFv=WG_;&V4MVDvjcuaA5R_Gzvhz^b3^c ze!7H*$$=jjdMxgE3dNa@S;Xd&Pm<^bm_J3Ewq?u{F3c4m6PutNr z@~LsvkBst-*nC_D%xr=cFb_PLZFtMaI#q4drjJ;xUNOx)|5jR{aG`IBgk;50Tf-#K(u+^81DSJcS8sk~@+(8yQjpemR)cu*+-Q7S%l@hIHA(s{@i zkO*&Bo;tH^q@sak>IV|~J9%+y9>?Dl4ENkgdPCffYP0zF9b$R1gs1LH z8|FqP4c@D4dhByM*WA@%S`%efa`^?bi#PCKx&7A3@igY<{F@9-lIdO$7FuxGaX+v= z&^jV%erq`k4V~Q45jQP&D0=?7r$J{C-3<$~g0#*imBs!>{9j&c;K%SGQf9?v0sjt# zlW}C1&_#@C%iw4{shhFnc-!2h(X*D5~|36vc)0+fY`^!yhGrvESYUjKft@ z7CvAd=Ou3$X3UHvvP(==D~Hwz4c6?g^v1QMs5l`BOL|DR*N;&UW*p1)=#lhzQl;BP zcEWd`f}CPSy8723iY6$}sAZuDHRTt_PPtq5j7_)qFC53UM7SdpVy4kPAd72$$q)7j z{iqgScZ1?`1?z#|>7tlZP>5{h3reBEZ!jFU^NfExxh5vXr|O&U($DDwgaUdG~qA36Crxh1TwmnUc-TN(rA6x3tl6m2jvIo0qAJM^V}!ymq( zmSkl*O2jY$^5W1pzsuNntU-NI~R50T|8fP2Ajab$pD~S3AE0CTF%M zXCXw12dJkfNH;^NQHF3aIb=a`!G}o|lXJ``n9(dLMYk(LJSs=mYC}9|YRlSeAvl6m z&h0K#?W)@ZYx^{fwx0dvv}zqNbl&)$=j1JuW1>FIu6dq+-T0sA0VjN3hJs&@CLnCb zmG~`(fYSM$)xVdRcwhg5eK7(@|ANE%7wMDRJ@yZSVIkK$O2M_lLo@;&?xKA)f?*eS ztZ`?4tas-Sq+rS-vq*Cv3cYb^7n_4M7EOM`#g%R?0ax_!x?(xkUek&slXDjRxY%1+ zLW`s%!^w5?)OeehAiim91z30V1F-s76FRe1!0eaqzFLABdZ-%4-rYHi$fQkePG-z7 zYZMax`bd4Ts^YSFQ~V~YL`r40{4$G{;<^gOGKNJVr35eL60B-XvF@z8Y!qcFZ#r#+ z(LRUboh5A#tJsxmgqCI1lf1!PvQCv&<>Y3kHcfLct5gc@YHqb>?n&CK>?4FB zpi{AnWusba#^5t;if^Tqz5plN+{&t$QfjDErp_ldZsA&Y{$DY!MZtqdr*Qg(DxHU+ zj)=)As!ru}xNDNu`RWm^0wX3i$9@Bj0V?c>sii!#rGykeHq82X@u2fX^2FbGVRqyM zaSk1Z%ocKFHoGAfHhj3T(2ShVC~zO(>HN{d4*ZZ2u|1MZZ}{nGN|@bJ^5QVKqjHjB z`z|D9h67rX7rq_?eFf5t#nEA2Q%bLv=3I3Lm8 z&7q&p!#5v@05MdH!5P{)O}4ley=Gm&W3I^_9)bb0lMXdp#&Ed}am2%l3@g#L2HBo9 z3*!cpY9Xa_i1T$YQ&CCFTeJpjEg91CpOOREvL@FF8rJ&zR7?P8LjOy-l+IoQKqTq_FWW(XbgJ_0ZuCP62qIg+oW1|m7OUL-dQIV_$HNpdQde1nsndQV+ znjniOCzZjU6Ze6`)NwB2=;O&;<`O95OY&6?QJ~((jcY9W#d% z*OFqT{zZR{d_Wr%nWUq}r#7HlHE9uYEM_Q3PNjG*haxIY8f3b<-xrpp%N>-Y_HvF{ zj4{)nUO3i(mXoCL$@U5~FHL6DjddH$$|8G+0HwjbUL-Fd4aFU0 ziiglWQ!?t3s^a6tUhqUkVT_fAbdQf0&zZGmwYpTH(3e`VZ`4o3pOiy$^kFVLnswyr z{)w6aC7Qdv;t+AD@~>~k5ssC_t%{>YQ-b%97L$O&eCRG{!+sxdr;Kq+9xlPjBViAB zi?l{-+spym0#|$6T4YHse^NUoH+RcjaUKH3SDPV)xbW9(mMUaYD8c>K%cK*3aMd%% zEhbA-n{(>?_=CQTNPJ9rPUlokwh=w1U|w`PmmOQ`zXTw?kz1C@A}EN4O?#%i0uoiL@5-dMp6++qi)*2x@sOkrM`Rh1x73yb75TNx&OFSFA;} zY1&L|5QjfYWQY)#Adv-5a8NT8al8HtS4~?~7uYWlEW;_aqBI-P(dl`eeIQUoxXYB2 zXicO==u>FnxyIR3xuY}2Vo*^3&A`IDhv?KqF|e9I+?4Td`McVZJ*w3ZqaklvV=v~z zawv$mxPdIN}_w>feJLX(DN#CZMmuH&z`TbHfQVz~E4L({LU`o-XRU2xGm>4+jiun0!`525&!$i#1e6tE`U>|E>#Q!GltK=N2&G)8yz@^T_@#$Gap^J z))%Z+Er_uIJ+qGw(05Y0A8{?7J@nX5REm49-<|2qfz|HOuV%S%EN*gCNOT;i8}>_@ zECBJ}gfKCKFK^@5o6xjp>?5#sAki^x#_X4hMv4>NTcnO(35K5d?3(b;QQH$s+Em&S z9q~=cC#8JMoNFZ2e&rQ-cCXhQpQ^~&zpfOcUa4aJb`xZ@XI1IoL;KR(MAnXq6%O^K zCZIBUZ#nka+Wg3I@9mI>4qs;$%hL$kL3jX%&r0I>kzY1{9ja4|@eVT2?+B;pu)`m| z49Mr!aAB2->>Ec;w#AXz^iYcw+taq3icH@#D-FZ)DFG3eS|PDa`u(?6{|K}+BPX8E zJt_@1#}Gy(BKS#^mMTIe8DicgLQxTXRr1-WV^VfDBa?OJxO@j^<^d#J*zNoyy8)o4 zu<$7;0ZdFH{wp6EyfpuWls(mq;^9Gba`KEom8l;IyJkA^_}K&pgJ#;X{G2Ov26TBp zi^3LF?d?yJ^&!m2Wv30!KjoqxI$Z5GznYL-x^WE5+?s=j+>%{&uAhx_SnhKzNQK0> zAF$jntxxcF?H|Fa4F#}e_JWjRy(IwC%4iJ(ay47~Xe|?U&85D{g@wCGlA6!2cAkaR zitFt~@B23`{BBxqeGs(m9me_;<*;_8cg&xZp`Un zb?)-YhBc9J;5g*+1;WDHl+D8YLT)OSWP9U1pk^Ut-_k9otE;<0HO|#4t{JfHf)Lci zg~jCS{QGd7o5LMvid6wuM`dh5?J}J7EHfq0bT>v;Y3Es3d^)T*%S~46)jLcF!y(I=8sLBBro3@_^ROR znNEG5Oa*t2ptmX&X%mq(xe_2?H#a<6B~~~uj9C_`2%+lrmV|R=2au>d>DrEE7Y!a+ zwITjvF=-2(5@Qc3-??l;_VL~`cM!%Iu04peeAeCLpvPruH*x^3ZX4{RB0qbJZld$9 z_eDT>K6A#r%SWzaD7@q<*w)hdx!-USsQw^}vAKxkKXjVU#_CAj76XwU)%3BONvWPf z6EBZ>A+;4A0oP_NVWoz>8W~(!IGjxx>%U|E@;cWk+~XyUDSXz7PFQoA4OVRa>ME}U zzc~t98#!%Z{GFe)j0oWWVQ(oW48kj~sLJT2_rQz%Bd7U|`Q^>h{?=Z_>GZ2h>^=b7 z##`^?!LyG+nA7hUqaXmH<-)X$0QJWQR_DDY&Fi+Z8NzZfe6u4(V7P4D;01Tf&Zlut z0d~|*P){O9P2Uw+7pW(qJkz^IVwxV(%)SU5Y;`NtkNex>$-w^R_{MQtYH))6-AbJ$ z!(P94!sax5SNVgy36Vt08D#7SeD&4nZNz~pPY{X+MP%YQUKlWa!W)(pvU4AOehim4 zTtVxVHNO+O*nO;$&(~i7W#&m%k7b6pvgG2i~R=eKMD`7b=rRn9~%59w<@$%1*SWpP^%?bXerpY2DO%${w?JteBWwJAWm! zsPH?1#!p%Jyb>tc4c#`BFQ!xc7R*Sjm?~a*@-byt^m&Y$+MWgW1){mZ+ql zu4lNAAi=>n#(FLgN6C0BP;Wh~?h$lCn(`#uJ5i{TQ*my_WvqA8`ip)b!^J#^y!s4;QX4`F0C=38UMSYx?fI~1`WNa;ZTj)?O{ z$k^8^@kfe#fy#CUon?hDil$fDZ1GDHtHiC^vA?`{+iZ>oakvyd0X1IXnzbv!pL{NX< z1VREE_pLFd&{eHR>&g=iKD>p{e@pB;DTt9U6h=6&{1?zNcHz_6-XA#72^Ouk3XcNqusnb+X1vcB3r_o zPuU|6Z8U*HYS5a~UJY*UQ0+2Z#~e>SqFQ4yIj|;maD_Th1bC5{nIQ!9ruS*x=SfUb zkqYh4!oBhZg&v9UsA+fQg;3M~V@1o8WCA!8-xdgcBFJn{XqP+dQKpaVv*?gt028Jz~~escDay5(iNj7EK{TDK}}3Ln6}LdGz9nst;&Z z8-i|mgbQNSK{0Qhcz~9RaYxQ{u~a&B8UJ~ViuB+8a6>xazZONYMc=|ow7c5{WBB$* z?C|Fi{6uD)(0pX`ulor3IDVol7R%*ql?5m&r6eLK&cs*cq^mGGFeWtc#SKbx8jI3v zusce~TFpzFCP?(H8QQ^lTG_uz*Ma5=rwL88YVdyo9hp+`r+Jwudt9H!`Bf?S9I_R=WQDAvmUl!Uj+lTT(osusoB^`0q@)cgNtk3Az1c zF1{rgTdT)0xH;7MNFtNM<{iHSTf7rHIDa@8j$tKank45JHUyFgUMjak zwT?Y{7@hu{+{=9oMgKFvR{WBSS``<#eq#MN;^JaRuZWRC8Ozz1`J_1fgxcwrHoM-;t$w!alwNy;C;jw&xSD|h`-QZg4!8}tg z!;hR;EI=t*SG2r2>4;0Qty3g3AQ(#(Ch6SK+TXwSglJX_A85<$CEYF-{~J}fg-=d3t?1>syx z*JaKOOqHjX`w=yrJgt#EQuJJNPQBF>ND<@zM+rMl=)wIJ4uE?`vgzz^qI|>Cz4g)` z?Yy{!x$+A0`J!1op)P*Xo`Nf0w9I97oI`BBm(FF4R4bp^AE9ZE=~I7A=T~bvyw!!8 zR8eOZrXmuNmje>d2uSM3sBW+(1=%~oC_@3GceKojdL~jU6I@Q0^9+J zG0ksA?7y(Sf&Rle*05Y0pME8SEKD7?Ag2CaC=x>WI>(Nt{DIVuStyi1PzJCYMIZOc zL(Fb^vn1zRB+N;o#la`owLp~7L{iOW*PS6cgH(suEB!W?wp@EAs_t6*_Qoqyzi_$n zH2eC4ckMQ<=H7@aPglaZCpi0h3%^`CIKGW*^3Q+vu>IB~$2s1UDGy4`I0kxXFp}8m z)dK&SsZc2a&QgHh|0}_lVWqDflPY7N&_J{>Opx|r+sQ-QimF!Gltzr7v8E4Nc(Uc9 zK5Fg5kte^{9yqa%vFU{sk&`<%oy>FwoUmF2e!RUQ4AAD8CymyGiekdd=&;@x58gxR zl-w;O7lkH=vJMZpRhIY+Ceo*8!&m-umST=oFGX#=1_I?yy?QVbEo*S!_^n+TYW>UP zvkW#(yfqO#w(RWs(4gz>%>T$(glY2M?%EMbi1w!v6kEjD7ye!v^sPV)qs)L6`yHmI z%UXk8?e`Jn$NFeEEv)XVI-s#-r(9#JB`c7II<{5iq+GGQ+C&%;Ve;Zi&(YwNozGnNhTF68iv*ywu?MfEka)$l4-o|Y+giU^}duk$J zF_l23z)m(iVmuLE?UU^&>Cv{Z$|Ka6AsGXU>kn(kCxz}#a*UMrml?O+Zg`}Hoq@|8 zb~U`x_p>XuB$MP*Su2%)_M-yk>EqRElrhK;?_s>N*F>3~RaH;q zcC(Z2Pa`b>(;O7Px&xWAdl~*a!{}+h}?f?I`{dSoLG}zJ@&U&C5hyQ+!CgKci@w=rDi34W*_KhSFE{EihuCUZmrLL z3iTwj++&Y|u!W^ijqnt~xup9e!JtiyT3|ZEwbQskrgVq_pk6Y3&`)SSktHm%$#6Gl8Gf78(nthd*4k-&5>K*Q4EiE zg?5_%o!VE4da~^E%+U3LEX>N2-%kC_^}5s7+s(5O2>yVV$41ODJS5I9lUw*u5{!4| z8e{SBkY-p(jTMv3B)1-b&nSkx-b^0Hih0mDc@P2vEK_wcGzOk=bzg^nynC89Zyau> zh)qs5Jh%mRQWw%W9ElaSOye@RG8st=V}`l`eFk>LXt@@1n#KL1D2srZfu_Oav?@?R zDN`}zt{C(plghz2u>TB}ozbK&YwESkETMa?DUsoGvkTfl<`9{Te_nas+F2n>3&LlS4mc*htNr~^i3~3NqE(TVVVfM1Ma~_eIeSfFI75Re}2Y>+Ed$P+^xA^Gg+Ft$#wX3Hkrd7!P4by#ru$l zx!y9v(;b!j7?Aa>R~$Wc`v^V%B|dv<{}3SD90(xX9D+d**}gy%*}a5y3XNL93a;Nm z^r_#bMbzH`aS=`~YQ}zxF%LXjTvo@fYnzlb-m$qmox1(X`8D$019ch?j0SDubT}r;*iBQI06^U{F&3CK{LGBnYm)$vpw{KW)X zh{u*qaQsH^__HiJtx`y9A6hc_(d(r9@Eg;GamFzyECdv|dqT2*P;@y&2}ehjiIoQHVMj zIk`8W>2#Ll$?}S6{$5Wluq{2qN($m{pw(O(ey*;;-6NgrHpiJqR9cR`-m9`*sW(g0 zFuu+>E-Bo#rT41T5q`>oJQ3bI@j}S?n=j!6NNsI++L&v@k~yMg_V33l^g<&lRPt4c zZWi^zh_$~jUp_y*-}$Q!2p)cp6=`PxWM^Z!!kCPBF1tOn0^dlkr!0%973tzODptsopDYsZBgHB^b?5fHv-QMi-E zUzqWi^JdEo?r0*+Ed18m;)l-fq?~)A3=DdX-yyXvj?;%E2Ts}a&RUC1x`|bWBTuLR z#iGRJgqf9!5*txdox~+6K{u7ycs3>2r&ohjGy;9W>pU^=D;#Y@+BwMegFS#aZwwhS zX#_`qfLRq=1oGr`Rd#8ME#ihHo`@wlpE=4X$_ynV z5aR!@y&?d$x-kCgtE)mMv-gxKQ06294T#d@<`z<@;$o=enc(u;@Y)v1J>hGm6vTlWQSZDb6svJn(mC?gX z;w3=TxqoA%nPI%!&~T{X?jWB)&$L{Ok2GhW_=%i=e-?7*_OOA;P?=Axom$X}PtAm%p+#-3jIjU6cwsCMQ6dub!A6gc1fypG0~DjtnRGdiTc?-Y$UvhS^NsKCFPs z$@me^WvK|^;%h;MXVe?gPF0N z?fU{H?>qkc4G#1Fsp>3%;)u3&4THP8LvVL@_uvxTo!}N2+xjoqEAu|GaRZ3S*u)8K`bnzKOgKa862W#|sM2Q0hn3Uq(C z7{7lVSDFZyOBmrQpvLD}g@x<*x%3?Zc1S4cT+GIe95=G~>l5Aqy2cQ$p0HF=_n#97vv{Xsl z_2dJ(%qCcxw3dRGAGwYO--`BYey*EqI45c$>gz+W3huI!;iiUn#%7$aLb*9v3G&xolLap0>4GK z@j$GN*WvycKkw6JW7nLG9*(YC!9V3pH6s3o+0WsC5syk!7ej!bs5H$TI*cO+opCL; zzCse^fGk@H7edh&Ga)+vWG(O;l5oTHd+;~O%yOp$DNMvEe)n{GqlsZF*}3*idhI@H z^AH)%brK|*YW%HJHIqwy_XQc)pFl2+798xPHadUXWnG?ika7k;D=7gqlcwA_ub1@r zdFXP{&kVdn6=Yb6V?(mKIn=oDDt!3wukB|!QTpk+m>RSWW8jL$coczP|1B{yHrNKF z^^gU8&4Gg*t3q46&q?UAOD5l8gRk0fT)6u}1;K|=$TaGkADb4W%%Fm#B!JSe*6@0m zpd!Oa6M~gx^ccA}6$wB_EC)_P?#Fajk@;0(*ySY??B_9LxE-b&ZYfw;fGNaEZ?W9Z z@cIeS2-4sy<~}w%Lbfxy?1aFx_`y|x*|`v7T6qp9jju@|DVb(7?CH!eG*5Gy&l+8h zRbM^8F!tpT5oH7_gW>9GoIpm};Yf!1O{25~qK{^yWgpO~+jaA%S(nwyE0EdwL!30c zKldt?xJ0aM&=1ycCR-5a38i5O*0PK$+gT3P>!y1@WKHxy>~~O27sP(<)ig}wRNBRr z%aKHq$VG*rl$FywL80@QG^{g$)G(eHOk>J}B_@)*1Pdw21lI-z;E;-&jIZWa_0rpSSA7mp= zY4%6fSDnyAb5@>5=Tji(VLG&@QJBH2*IT9d#Z0;Q1}$-PDQPDU=b^MOJ-_5unLk?& zJZi>Qg3o#87MvE77KLnnubDpISzVT$FGU~oW?sqGR>)#s1~C4_i_tCZz~R{`G{gU{ zE$-s^yxBhQl6sEv)_Qo3lC-ZDfTii0Zc2yEfn()i7M1a+7BB|f{1XW1VWwf3P^+de z<&}b!6y9Xr(kUtJ5k~uysJ}ev!@ZJgTX43?N(3|OzqhI_ zsE`L~Z(%4Bo2itEVg!ZfoN{oLg?~rEvg_D~ERcyBo#J#Sl8d<@Xys_0V6>-ceP)`5dl2>|jwH~b+=fqshaPwn^QIdTGV^Ti z8BzI7>A~8Nw6PZUN=A6is)VG6;#e}?*nJ}5PPBsTSPCo{pUH1sUePRlAORuxUGTL; zKEk~Tq9QxSdq&rcb2q7smlm$PdEqm_b)ERpIu%W>VLYrJ7aua2XM*1h2BvVi7cSXjq-L*w5-) zq9A6ft4bIGNCMU02vz_tSz-F^eHzfm>oq1zs4eB@ z@mighTiklDogFW5lyrl{W9cm1P0|dWwlOGh#Ja$N$km}-j? zY``YYW?#ckjy5RzMFrfp_H13V40I@GOpetB-1a9QVGpY6k-=rTjyBAN>)HrTAXhx? zjs+{5lV)GZRr2S&0QY?3JgpBZBe52ll7*daQZZ++teaus3k5iw5W=xmxQO%El^)7a`2Q7ALgm-8h!U^Y(ne^KbVI#U}z#)(&OI zJDMZDDt*AHcv3>&{(4=K_-i*KDFP6MMhTKL1F6)&UtMqCUz!7YI1}H)F1sD+?HsvM zwnbTk?(?UESMwaPnd@-|!F3FkpxHG`X_-S6%)#&Q8Y130A{gi2agh>GlFZi|_=nIj zwOXpd3C|nC_-6?4odNmsLdj^GmJ30Dm3 zp^Rl(mgvZ7rg?OPuqj8wp}kBq5<%s(y*A39AfzGg1#VM{I=3eH zr#^4k3i-u(AteXe|4|m>-P1 zBXT7m&IZ-{Z`Ubnyz&hjqacZm48@VyU>ux?>kb!B8u`*$ z6tcI(Z7o)f{5l1?jg>WYf1To^3 z-<_=Hk8jxi0(ZX&7?QJDyYNQ#(tSnb(7qlF+`@y0 zGG6G;Wc?tFFKF@juW~+#NK9N0>>e|@;?1~G6^qJ%ucLp^)ph}|*{{=dgk_%K=1}uw z1yk2-(#`kOv*gNxB5=4sc1PG1MXV;pYlZU0#XlnFvM&dZmD^_C%RR9Rwzz!R@(o#^ z=+} zr7EYu@;hHinSeF0V{y^VS_`oB3u!ar0?;%DO@ZA~5#pvo<3+5q7lQov3dG(!cl(yT?b(xcB+F_-Ld` zm66hh_Bn0T?$LPQU z{0+si%bDJMog9=Z86uvtvJ#wP9>-<@Hv-={&B;l}tM8!u__j-Xf#2KA)XS_#9;<=1OL|`w zg{mpfY;ju3s^xvMcEcN6EJj35M--uDj)8VE zyH~>{jkyBn+K>r{rG;rBb1SYHD*{O|i>(6MIJi^k!p#!|E5f^#*dRw;?j7LyG*I&~ zC!S!yeWH7M1JHiqalYa&v7bn@H|TP{rCu&~7tP3qkg?Y)*Zm4k%i<|wqoC_Yfl(4WW|6uE z1IoaVykI1l6mgiCB;j-@SYWd^ILaF8@*D1UUPx>^3V$OR|F)Ub9mQ@0TKKHO3SztkrL_O9a;xo~2 zlCE0m`)9ZXfw}{QXWHLn<&o^T$s&mTEI9mcC9^#kg6rhIpwb#~8{qp}-QHG}Mw5ni zIZ|iJGmHHg-XrGK2bsQLw&}_*syR+Ee7^<@-EtE&tjmfTcE}xt56B4WX_1~RfCnQ$3*fB;!?xeos|dU_fV?S1>I_e5iuA8g zp@Hcs)BHLeXt!xJHCZ;RJCKc4`R(*$NjQnCq4O-XuE^}^bxi(QRYrclRHsz3puDKu zen8iKi?)cpKXIuDpE2-LNycrIr8<0Co1($PtV3So;5T?5W3tjsBaVtM&lDXWi<;=xuTdL#5h;7fAWS}>n zliW&C-J|?)fwu(b5K7nAgCl2JIri-qLuphbM=~#o^*Un*u z4?aO(8`voaX8h1Vz?(8-Db{BR2FG9^)695+rSPsSI+Fd}nO}~4!7{v;?j0}}tyjn$ zxz;m=LNVt%%eS^*N#m{d(KI#P_voO;g3;Uq`GV@jC%)` z{s5K^NVk%P&ogIrM{Y~TGjp@_#6s0;*<0-|?NaSPNd#d4>P2()x)kY>pJGSo_ntZx zC;?TOy^^8@I4P?_Rmwb0H_U0f6#5hQjxRZ6HW>hyYJ49a9*kN>mX2d`!{0s~Rv9&p zU+JDV*$ipn)K9ARQ|X1!V7_D~2P8KS?ym->l`-%x>@Ip{UxE^~Bt992U6)9E8*J!5 zA&+|jtFqLhzVLP$Y}L4ar-VQ&8RxK$x>0fEC++wSY5bB|{3k-)MMhe)W>7}Uq%aGy z4YsBwaQ{XE-xPzn_kqJG$+ht*gCA;S4B;T7GC2v#A?-#fLtVF4@oSfgmTc9WU_9}~ z$E1k>@D)v@&GjGJCH6gfj|qwuw+v4&%Ir0AAoqA&@S0?kY;rWcGp{_oSEH0dj_@G8 zhvsXwo#9Vj(7Nh*1Mp-yB42@A)2S{z5Hc_I>ISQ|^73E#Ii zDV+JdPl>)k39i$JNrAf_uRm@H1l<_1v%D1^XGS!xYk3<xs<)1$j0{6LQ zVMvWe#~e27`Wg6h506iG<%}!Z=5gnvVS2d3(pQ-dzhqUrlYoOq0Uzw!Cl&^LJgawM zMi}_*ZQxwho1t$?%Y8L8zvbH*;(Gg(`0H)L9PT!drU=SMrv!D81RxJJY8U}%*5trkJ(cV#X{ zR0s%~zpsi&$8do_qIn!)b7rcs9hf2cx_Yc3gnFhCTzP~PzGA7CC>$oiJDFUF2|2xt0UNN=D}EKk*CbYB`l@Q|utEPBoL zH8<&klmS{1(FXF)r$GI|)+w&C{+GM1+_MjVu z5ZQN#0Q~-hrKk6geOFA>>V%fk2yx4j#~5L29^D9O%i|s>IhYM_%AUD#wKd>omKUVV+)3u}*B-W$n09lTz9b+CG_3LKuZe5%M{7}00v zmW6EEE)TqCH{@j2YsB44u7*G46BTrGGIQwet}L<{4ohw@VfbEbWQE2XTTw=;sfZYM zSb_g+N$nh02^-hpVkmZ*Qt@@c781^U^;_#?I4%(8@y9Jd`YcDC+j52F0NdPXA{D!I ztes^veALZ(+PS(SWw$rQ30s4uagJNEMiZOL!>C1jG7;YLnk!PrTCKiCv6|hoIAJ_8ic?D`fKpOrtVOfH zB+W^({5z{CP3#z+U}mZkT4w-~6-&8Z9SPW&Y52j!2QOCr+dA(zdhf7NvB6J(er#Ul zh<)PW-g5wVH;!l?yJOC*BUSAsCC+n81K}14rp#4KXzjKL0l}=yy8No$*L-};fC-VFURL?clu+XR7EJEll&uXnW1^x;X#RVt`pGOIrWl)r(CzIRGxcu?=y!2HJ;XZd9~s6t$n<} zpTb`#`<(nv8LMggUEB9VZH%Y^eHZBxgW;aIhhUO8*0VVSuPWPu3-|pLdbIEvL_m1Y zl=X!c9xuD%#?Rf)v+F&~Q-v=mYD8}QzF6r4B+6X)wET)4N`q1wMrydoTD`!a{S7xs zG~1J$?YF#u-TUa+8^xbk1?HV)J@%4FE;^t6vP5|X4Vi6p5F4bo0QE7pDgwHfQ^EDI zoejKcw!T7FR^#95IeP347u%2o^joH>1BdZanlo`wmqP{jHtbf~$F)0H(`@6%;x-sz z_FO)(WD0J#;|K}3o8sk26Bh#grrA5yad0zD*5t{$(kFZdWv?iR9bi_;p# zUURB8U3pfDyE{eJ)?Kg^;I^nV?`xVb7lPTUf~&7wr1@9m`WVu1;=nlV!gC&>K+ZsO z_Sj8b~rcPhN}w>rfhab6|WO%{Og{!~n->G3Tr2}7_s zyIQH2U@5UL^Xud#e3$Ht_kmpT0j_T&wD%A9<{pTXq-Sk)knt<(~InierO=! z2p`()B!L$UCcaa=5mbrcsL4Vs7M`-q7^R%epvuJ^1oYi+z~zsU_uv zU!W}l-V*VwsYk8mmq(M+mjQ9C5px7Q_>qC%Xe&o8gF29C4+twG?0)iPx;!JYZny5D zL9~mY-*1Xq$lSoG2et3{#84@DQUsoADj1^$F8bd*V83}|Ct%1x_|>0cgQUpt+^+Zy z^eJBPFfh_HPz?oz1SU1`anCg=B|?*(DX{-QFrP#XfA-)1bf9rFO3xu-xjUz6cjMM} z0wM`z#ayC-exoCqHg`8kC+>eS$Pw7m7+yq+?nfM8st$qy_9DR_v{Q~TzI-N$ zP_qtp(mHb8?P_-M!H%TL(?XclnIIAq_vPiE6VWSN%Al-LTYKNK(xX(;d$~^zR7)St zXG`s7UlcBu-W}Vhl&}3c2RJ%o!`~j+FZ_SJ0Dt&xJgkd6?}ng3+Tcb@btw$yLU!p( zKpIhPH)Fm6`Dny@4S)LNMlQl#!eTh5e8zT8{us-vs2gZbxlU@8~ zLS%I3$0H|3uRN*fL`UA{G8AOawo5XhsAH@?Ywqr^)eq0vTGxkt)w?A~-3&9g`;bK#`3Z}oCI2V%~u zFJfM*I$obtt5n76{CiwK+A7eEB$bxi+KePI0~GY{ELJp=_erUf)L`D-s~nu8TH4WF z!+tT>0}WZWl8H^-b;iVQI_{vR*HIyLZe=^*3hUpU=)Op$e;})AWNvA#w0;m{nwegh zCvuCbxNmBb^=ukkfxRxmAumA|E+H%}Erros!LU|ho}SCy)0iu1)E8`q4l}f~xAVoC zEmq?yrj2OEfb=-)V4vYKqq_=S;c}v**I#T}1d@JY&W$a|$O0Ej?+tW_d)`+{?xT+9 z*E$j7*0u29y}Cv^M$8o;GgGk{SCZ0B;&XtE$Z@2yJKp1B z7-L*%jVdg(HbvH|amZ@UHk6@QWiXmd$Bq=+@!Z`@4X;tEk1p#$-ZlT3WJlLxlv0@O zUh#K>x|WFkj6s75ZaC|3N*+_Fklbp+0S;)Q*i(IpW|vr|d#DpvvEeBW%o-yoE=Kd+ zG~QnG>yWT*nfE+0$G!n57ulC*tXmn{F&y-5MB zSk5qX!e#K&lJTOd#PbFhE7`MfEB%ZI+_{*k9z&MnFoq16zIzF zOGLGQy6=pTy^0JrJAvV0+Lh4lF!1B@;>FerM>sm(6%>K!;0_1NwyXvFxgEr6Y7@iG zkH|5;*ldf}(D8j6cgFql*t~}Cle)TFxH7Uh9lM2@>;$5%>`tjyNZOzTo3C_^QFfmm zsTF~#RCPhX@!*ZR{1kzyHYegpHIX~yy{*qq`n?CbciClsXJxoIH5+MMR zIoEfXA!Dk|Dn1;wJmL%l0;+tKT&XMlE~!5=`;^JKzy}Ii6QrPJtyhyIYh~@#`^BQu zg1eXA6j&+DI-KJqCEQ+@)+4=erSjzVx>$!P zmmu=QyfY|7tcyQ1Wa)^0qh#@=pXO~lM4#?7ymc*HHN0gg1PU6sXB?{F{fZ>tDCI)C z4zr7MADYos=+X77kKlU1oR6l=g4CKte=b#ElHKZeT~3lB?)`o-C`a){PK( z9=)f${WLYSlnz52WHUn84}xC{p`N8XM^fnK)Sc47j|Ybfg(WvSFy+`6O*N<~P}OCz z5vql7vwT8P0phdPxrY%F9txWi;hY!3h-@1ms}`gL;$dDEYS1C^=18y^01@}@cE??W z3^qO!#tfk4#~vc8*9gTi($t6YZ<*krfy%-CjWlZJH)$(fjLhqejz+`#hSE{`JW-X7 z`>xsT{ptp`H`>cx`Y}4zH~l=d0f;CdUB??jN26J6;DXXNKkdg~ww7mvg7$Yg&GQ<% ze)k{3i2AAc60B&A-|y)Fiyto;>(TA&mjrB1w+Vj}|(ZfOGKn(V>no5cP;4~?a|MM9qai$5$YH}In)H_N|kJ%wEE zdx$Z6Fc7ko*OZyo|CG!w&B?BIv=@OJI>X*t!GUulJ9dnILly;;_GbzLJoz@!^eyTP z3FJ6(Fmdx-3yB*J!WKSFbNv27JBI|e?BPdEz|QNBeLkBXBJuZxY^0Y|Imm3u@`1iG z`~1gsxuzr*Sya zJh;m-lFd&fn=g^uzqV+wix*k~8f!T zn3ir71+XJq3a*|ATML^!$z&d9uh&(qV~yQRUJXAQSBDwbpX|E&S8!O65W-Z+>9)&z zGMbzw&w;!+q_q|G&ugeXvj@*#c7abnsgu&v1r4nWX-*X5c47i`^q;+i-j&%PL5+I^ zjT(Ca(EpQqY5vF(`frjLkz+&XzZp03j;)~oqr4A7IQb0oR}&o+aAHOLSLF3Qz~=T{ ztx)Jax6J=;#X-v)pe;Ho5FsZKNaPfq_&;)*74P8SJ1G3W)O%SRw8#yDJf{bNPHBk$ z(LVeKTI2f*y`7R1|DzoD4|FQ{7s3_B0Og;f6aUqZdmpmpJz9hFAMi-{9b^Sfp5YSz z73g}0yx*aJ=d~mD4yh9VRYZCR+TODbaQxHDtmNM-OgN_?{*Oe?uXo7)eK|_>ABaxo zFLZIvLj3>ra^Bag{(;Qo-yurSrwcX!i~(rtf)Z5wZem)zo4NoVYmnfj6#&r|Bw!~9 zV!K8M_3j~qo-a`WzwAJWS3&?3d(h<-5yX8zN~@GT(#HRJE;r&|R8PTpVB zD4!67cZ3cKy(0uH7l88bxQPD=xcT2f-^=2lfkM#boeF@j93*xxO8k%K_&?n5ig%6} z)Oybbz#aNK%-cN=p#R5TlXUF;SNMUB_@C9pf0~z${1?RfJMp;(LcsYH=<>k;@HP+n syvPdje?%w#=c($S<~7S8@>K@hkBTtwU;THn!}mQ03j*TT&VOqE4-{M+YybcN diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index e10ceefe2a012..f01f0a84a786a 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.2.1-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.3-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=7c3ad722e9b0ce8205b91560fd6ce8296ac3eadf065672242fd73c06b8eeb6ee +distributionSha256Sum=bb09982fdf52718e4c7b25023d10df6d35a5fff969860bdf5a5bd27a3ab27a9e diff --git a/gradlew b/gradlew index fcb6fca147c0c..0adc8e1a53214 100755 --- a/gradlew +++ b/gradlew @@ -83,7 +83,8 @@ done # This is normally unused # shellcheck disable=SC2034 APP_BASE_NAME=${0##*/} -APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit +# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) +APP_HOME=$( cd "${APP_HOME:-./}" > /dev/null && pwd -P ) || exit # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD=maximum From a6c0bb30f93db1c0d5204211273650dfb6eda7d4 Mon Sep 17 00:00:00 2001 From: Neetika Singhal Date: Wed, 23 Aug 2023 22:38:38 -0700 Subject: [PATCH 18/30] Fix sort related ITs for concurrent search (#9466) Signed-off-by: Neetika Singhal --- CHANGELOG.md | 1 + .../opensearch/search/sort/FieldSortIT.java | 25 +++++++++++-- .../opensearch/search/sort/SimpleSortIT.java | 26 ++++++++++++-- .../BytesRefFieldComparatorSource.java | 6 ++-- .../DoubleValuesComparatorSource.java | 4 +-- .../search/sort/ScriptSortBuilder.java | 35 ++++++++++++++----- 6 files changed, 81 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5c284dbe8992f..1cd63ce1c9276 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -152,6 +152,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add support for wrapping CollectorManager with profiling during concurrent execution ([#9129](https://github.com/opensearch-project/OpenSearch/pull/9129)) - Rethrow OpenSearch exception for non-concurrent path while using concurrent search ([#9177](https://github.com/opensearch-project/OpenSearch/pull/9177)) - Improve performance of encoding composite keys in multi-term aggregations ([#9412](https://github.com/opensearch-project/OpenSearch/pull/9412)) +- Fix sort related ITs for concurrent search ([#9177](https://github.com/opensearch-project/OpenSearch/pull/9466) ### Deprecated diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java index 6a68304fd72ae..bee242b933dfd 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.sort; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.UnicodeUtil; @@ -45,6 +47,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Numbers; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -60,7 +63,7 @@ import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.io.IOException; @@ -86,6 +89,7 @@ import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction; import static org.opensearch.script.MockScriptPlugin.NAME; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -105,7 +109,24 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.oneOf; -public class FieldSortIT extends OpenSearchIntegTestCase { +public class FieldSortIT extends ParameterizedOpenSearchIntegTestCase { + public FieldSortIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + public static class CustomScriptPlugin extends MockScriptPlugin { @Override protected Map, Object>> pluginScripts() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/SimpleSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/SimpleSortIT.java index 6f0f25b72852d..ddfbc3cce2be6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/SimpleSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/SimpleSortIT.java @@ -32,11 +32,15 @@ package org.opensearch.search.sort; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; @@ -45,7 +49,7 @@ import org.opensearch.search.SearchHit; import org.opensearch.search.sort.ScriptSortBuilder.ScriptSortType; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -61,6 +65,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.sort.SortBuilders.scriptSort; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -70,10 +75,27 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -public class SimpleSortIT extends OpenSearchIntegTestCase { +public class SimpleSortIT extends ParameterizedOpenSearchIntegTestCase { private static final String DOUBLE_APOSTROPHE = "\u0027\u0027"; + public SimpleSortIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected Collection> nodePlugins() { return Arrays.asList(CustomScriptPlugin.class, InternalSettingsPlugin.class); diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java index 430a1f90ff3a4..3aa4d9cb782ca 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java @@ -91,7 +91,7 @@ protected SortedBinaryDocValues getValues(LeafReaderContext context) throws IOEx return indexFieldData.load(context).getBytesValues(); } - protected void setScorer(Scorable scorer) {} + protected void setScorer(Scorable scorer, LeafReaderContext context) {} @Override public FieldComparator newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { @@ -134,9 +134,11 @@ protected SortedDocValues getSortedDocValues(LeafReaderContext context, String f } return new FieldComparator.TermValComparator(numHits, null, sortMissingLast) { + LeafReaderContext leafReaderContext; @Override protected BinaryDocValues getBinaryDocValues(LeafReaderContext context, String field) throws IOException { + leafReaderContext = context; final SortedBinaryDocValues values = getValues(context); final BinaryDocValues selectedValues; if (nested == null) { @@ -152,7 +154,7 @@ protected BinaryDocValues getBinaryDocValues(LeafReaderContext context, String f @Override public void setScorer(Scorable scorer) { - BytesRefFieldComparatorSource.this.setScorer(scorer); + BytesRefFieldComparatorSource.this.setScorer(scorer, leafReaderContext); } }; diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java index 34e86070054c9..e70916c33882c 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java @@ -95,7 +95,7 @@ private NumericDoubleValues getNumericDocValues(LeafReaderContext context, doubl } } - protected void setScorer(Scorable scorer) {} + protected void setScorer(Scorable scorer, LeafReaderContext context) {} @Override public FieldComparator newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { @@ -115,7 +115,7 @@ protected NumericDocValues getNumericDocValues(LeafReaderContext context, String @Override public void setScorer(Scorable scorer) { - DoubleValuesComparatorSource.this.setScorer(scorer); + DoubleValuesComparatorSource.this.setScorer(scorer, context); } }; } diff --git a/server/src/main/java/org/opensearch/search/sort/ScriptSortBuilder.java b/server/src/main/java/org/opensearch/search/sort/ScriptSortBuilder.java index 343af749bcc68..bb1930eb3a953 100644 --- a/server/src/main/java/org/opensearch/search/sort/ScriptSortBuilder.java +++ b/server/src/main/java/org/opensearch/search/sort/ScriptSortBuilder.java @@ -69,8 +69,11 @@ import org.opensearch.search.MultiValueMode; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.Locale; +import java.util.Map; import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; import static org.opensearch.search.sort.FieldSortBuilder.validateMaxChildrenExistOnlyInTopLevelNestedSort; @@ -355,11 +358,19 @@ private IndexFieldData.XFieldComparatorSource fieldComparatorSource(QueryShardCo final StringSortScript.Factory factory = context.compile(script, StringSortScript.CONTEXT); final StringSortScript.LeafFactory searchScript = factory.newFactory(script.getParams(), context.lookup()); return new BytesRefFieldComparatorSource(null, null, valueMode, nested) { - StringSortScript leafScript; + // introducing a map to keep a mapping between the leaf reader context and leaf script + // such that the functions of the class are thread safe in case of concurrent search + final Map leafContextSortScriptMap = new ConcurrentHashMap<>(); @Override protected SortedBinaryDocValues getValues(LeafReaderContext context) throws IOException { - leafScript = searchScript.newInstance(context); + final StringSortScript leafScript = leafContextSortScriptMap.computeIfAbsent(context, ctx -> { + try { + return searchScript.newInstance(ctx); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }); final BinaryDocValues values = new AbstractBinaryDocValues() { final BytesRefBuilder spare = new BytesRefBuilder(); @@ -379,8 +390,8 @@ public BytesRef binaryValue() { } @Override - protected void setScorer(Scorable scorer) { - leafScript.setScorer(scorer); + protected void setScorer(Scorable scorer, LeafReaderContext context) { + leafContextSortScriptMap.get(context).setScorer(scorer); } @Override @@ -403,11 +414,19 @@ public BucketedSort newBucketedSort( final NumberSortScript.Factory numberSortFactory = context.compile(script, NumberSortScript.CONTEXT); final NumberSortScript.LeafFactory numberSortScript = numberSortFactory.newFactory(script.getParams(), context.lookup()); return new DoubleValuesComparatorSource(null, Double.MAX_VALUE, valueMode, nested) { - NumberSortScript leafScript; + // introducing a map to keep a mapping between the leaf reader context and leaf script + // such that the functions of the class are thread safe in case of concurrent search + final Map leafContextSortScriptMap = new ConcurrentHashMap<>(); @Override protected SortedNumericDoubleValues getValues(LeafReaderContext context) throws IOException { - leafScript = numberSortScript.newInstance(context); + final NumberSortScript leafScript = leafContextSortScriptMap.computeIfAbsent(context, ctx -> { + try { + return numberSortScript.newInstance(ctx); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }); final NumericDoubleValues values = new NumericDoubleValues() { @Override public boolean advanceExact(int doc) throws IOException { @@ -424,8 +443,8 @@ public double doubleValue() { } @Override - protected void setScorer(Scorable scorer) { - leafScript.setScorer(scorer); + protected void setScorer(Scorable scorer, LeafReaderContext context) { + leafContextSortScriptMap.get(context).setScorer(scorer); } }; default: From 4e688086bcab9d7274597f4e8cc59e7456673a83 Mon Sep 17 00:00:00 2001 From: rishavz_sagar Date: Thu, 24 Aug 2023 14:03:32 +0530 Subject: [PATCH 19/30] Fixing unblock condition for index create block (#9437) Signed-off-by: Rishav Sagar Co-authored-by: Rishav Sagar --- CHANGELOG.md | 1 + .../allocation/DiskThresholdMonitor.java | 14 +- .../allocation/DiskThresholdMonitorTests.java | 140 +++++++++++++++--- 3 files changed, 136 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1cd63ce1c9276..b49836ef73e8e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -164,6 +164,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix memory leak when using Zstd Dictionary ([#9403](https://github.com/opensearch-project/OpenSearch/pull/9403)) - Fix range reads in respository-s3 ([9512](https://github.com/opensearch-project/OpenSearch/issues/9512)) - Handle null partSize in OnDemandBlockSnapshotIndexInput ([#9291](https://github.com/opensearch-project/OpenSearch/issues/9291)) +- Fix condition to remove index create block ([#9437](https://github.com/opensearch-project/OpenSearch/pull/9437)) ### Security diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java index 0bf16ab803ea0..e6e5046ea28ee 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -380,9 +380,21 @@ public void onNewInfo(ClusterInfo info) { if ((state.getBlocks().hasGlobalBlockWithId(Metadata.CLUSTER_CREATE_INDEX_BLOCK.id()) == false) && nodes.size() > 0 && nodesOverHighThreshold.size() == nodes.size()) { + logger.warn( + "Putting index create block on cluster as all nodes are breaching high disk watermark. " + + "Number of nodes above high watermark: {}.", + nodesOverHighThreshold.size() + ); setIndexCreateBlock(listener, true); } else if (state.getBlocks().hasGlobalBlockWithId(Metadata.CLUSTER_CREATE_INDEX_BLOCK.id()) - && diskThresholdSettings.isCreateIndexBlockAutoReleaseEnabled()) { + && diskThresholdSettings.isCreateIndexBlockAutoReleaseEnabled() + && nodesOverHighThreshold.size() < nodes.size()) { + logger.warn( + "Removing index create block on cluster as all nodes are no longer breaching high disk watermark. " + + "Number of nodes above high watermark: {}. Total numbers of nodes: {}.", + nodesOverHighThreshold.size(), + nodes.size() + ); setIndexCreateBlock(listener, false); } else { listener.onResponse(null); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitorTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitorTests.java index 3f54387d39579..6ab57d10b05c1 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitorTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitorTests.java @@ -55,10 +55,12 @@ import org.opensearch.test.MockLogAppender; import org.opensearch.test.junit.annotations.TestLogging; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; @@ -67,6 +69,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.LongSupplier; +import static org.opensearch.cluster.routing.allocation.DiskThresholdSettings.CLUSTER_CREATE_INDEX_BLOCK_AUTO_RELEASE; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; @@ -581,12 +584,16 @@ protected void setIndexCreateBlock(ActionListener listener, boolean indexC ); advanceTime.set(false); // will do one reroute and emit warnings, but subsequent reroutes and associated messages are delayed - assertSingleWarningMessage( - monitor, - aboveHighWatermark, + final List messages = new ArrayList<>(); + messages.add( "high disk watermark [90%] exceeded on * shards will be relocated away from this node* " + "the node is expected to continue to exceed the high disk watermark when these relocations are complete" ); + messages.add( + "Putting index create block on cluster as all nodes are breaching high disk watermark. " + + "Number of nodes above high watermark: 1." + ); + assertMultipleWarningMessages(monitor, aboveHighWatermark, messages); advanceTime.set(true); assertRepeatedWarningMessages( @@ -605,22 +612,11 @@ protected void setIndexCreateBlock(ActionListener listener, boolean indexC relocatingShardSizeRef.set(-5L); advanceTime.set(true); - assertSingleInfoMessage( - monitor, - aboveHighWatermark, - "high disk watermark [90%] exceeded on * shards will be relocated away from this node* " - + "the node is expected to be below the high disk watermark when these relocations are complete" - ); relocatingShardSizeRef.set(0L); timeSupplier.getAsLong(); // advance time long enough to do another reroute advanceTime.set(false); // will do one reroute and emit warnings, but subsequent reroutes and associated messages are delayed - assertSingleWarningMessage( - monitor, - aboveHighWatermark, - "high disk watermark [90%] exceeded on * shards will be relocated away from this node* " - + "the node is expected to continue to exceed the high disk watermark when these relocations are complete" - ); + assertMultipleWarningMessages(monitor, aboveHighWatermark, messages); advanceTime.set(true); assertRepeatedWarningMessages( @@ -722,6 +718,113 @@ protected void setIndexCreateBlock(ActionListener listener, boolean indexC assertTrue(countBlocksCalled.get() == 0); } + public void testIndexCreateBlockRemovedOnlyWhenAnyNodeAboveHighWatermark() { + AllocationService allocation = createAllocationService( + Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10) + .put("cluster.blocks.create_index.enabled", false) + .build() + ); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder("test") + .settings(settings(Version.CURRENT).put("index.routing.allocation.require._id", "node2")) + .numberOfShards(1) + .numberOfReplicas(0) + ) + .put( + IndexMetadata.builder("test_1") + .settings(settings(Version.CURRENT).put("index.routing.allocation.require._id", "node1")) + .numberOfShards(1) + .numberOfReplicas(0) + ) + .put( + IndexMetadata.builder("test_2") + .settings(settings(Version.CURRENT).put("index.routing.allocation.require._id", "node1")) + .numberOfShards(1) + .numberOfReplicas(0) + ) + .build(); + RoutingTable routingTable = RoutingTable.builder() + .addAsNew(metadata.index("test")) + .addAsNew(metadata.index("test_1")) + .addAsNew(metadata.index("test_2")) + .build(); + + final ClusterState clusterState = applyStartedShardsUntilNoChange( + ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .blocks(ClusterBlocks.builder().addGlobalBlock(Metadata.CLUSTER_CREATE_INDEX_BLOCK).build()) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) + .build(), + allocation + ); + AtomicReference> indices = new AtomicReference<>(); + AtomicInteger countBlocksCalled = new AtomicInteger(); + AtomicInteger countUnblockBlocksCalled = new AtomicInteger(); + AtomicLong currentTime = new AtomicLong(); + Settings settings = Settings.builder().put(CLUSTER_CREATE_INDEX_BLOCK_AUTO_RELEASE.getKey(), true).build(); + DiskThresholdMonitor monitor = new DiskThresholdMonitor( + settings, + () -> clusterState, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + null, + currentTime::get, + (reason, priority, listener) -> { + listener.onResponse(null); + } + ) { + + @Override + protected void updateIndicesReadOnly(Set indicesToMarkReadOnly, ActionListener listener, boolean readOnly) { + assertTrue(indices.compareAndSet(null, indicesToMarkReadOnly)); + assertTrue(readOnly); + listener.onResponse(null); + } + + @Override + protected void setIndexCreateBlock(ActionListener listener, boolean indexCreateBlock) { + if (indexCreateBlock == true) { + countBlocksCalled.set(countBlocksCalled.get() + 1); + } else { + countUnblockBlocksCalled.set(countUnblockBlocksCalled.get() + 1); + } + + listener.onResponse(null); + } + }; + + Map builder = new HashMap<>(); + + // Initially all the nodes are breaching high watermark and IndexCreateBlock is already present on the cluster. + // Since block is already present, DiskThresholdMonitor should not again try to apply block. + builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, 9)); + builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", 100, 9)); + monitor.onNewInfo(clusterInfo(builder)); + // Since Block is already present and nodes are below high watermark so neither block nor unblock will be called. + assertEquals(countBlocksCalled.get(), 0); + assertEquals(countUnblockBlocksCalled.get(), 0); + + // Ensure DiskThresholdMonitor does not try to remove block in the next iteration if all nodes are breaching high watermark. + monitor.onNewInfo(clusterInfo(builder)); + assertEquals(countBlocksCalled.get(), 0); + assertEquals(countUnblockBlocksCalled.get(), 0); + + builder = new HashMap<>(); + + // If any node is no longer breaching high watermark, DiskThresholdMonitor should remove IndexCreateBlock. + builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, 19)); + builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", 100, 1)); + // Need to add delay in current time to allow nodes to be removed high watermark list. + currentTime.addAndGet(randomLongBetween(60001, 120000)); + + monitor.onNewInfo(clusterInfo(builder)); + // Block will be removed if any nodes is no longer breaching high watermark. + assertEquals(countBlocksCalled.get(), 0); + assertEquals(countUnblockBlocksCalled.get(), 1); + } + private void assertNoLogging(DiskThresholdMonitor monitor, final Map diskUsages) throws IllegalAccessException { try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(DiskThresholdMonitor.class))) { mockAppender.addExpectation( @@ -756,10 +859,11 @@ private void assertRepeatedWarningMessages(DiskThresholdMonitor monitor, final M } } - private void assertSingleWarningMessage(DiskThresholdMonitor monitor, final Map diskUsages, String message) + private void assertMultipleWarningMessages(DiskThresholdMonitor monitor, final Map diskUsages, List messages) throws IllegalAccessException { - assertLogging(monitor, diskUsages, Level.WARN, message); - assertNoLogging(monitor, diskUsages); + for (int index = 0; index < messages.size(); index++) { + assertLogging(monitor, diskUsages, Level.WARN, messages.get(index)); + } } private void assertSingleInfoMessage(DiskThresholdMonitor monitor, final Map diskUsages, String message) From 762c70bf3b0dd5cbaf2cac04d52dd8abded08704 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 24 Aug 2023 08:05:50 -0700 Subject: [PATCH 20/30] Implement range reads in HDFS repository (#9524) Resolves #9513 Signed-off-by: Andrew Ross --- CHANGELOG.md | 1 + .../repositories/hdfs/HdfsBlobContainer.java | 21 +++++++++++++++++-- .../hdfs/HdfsBlobStoreRepositoryTests.java | 4 ---- 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b49836ef73e8e..b0e902b1b97fa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -89,6 +89,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [BWC and API enforcement] Define the initial set of annotations, their meaning and relations between them ([#9223](https://github.com/opensearch-project/OpenSearch/pull/9223)) - [Segment Replication] Support realtime reads for GET requests ([#9212](https://github.com/opensearch-project/OpenSearch/pull/9212)) - [Feature] Expose term frequency in Painless script score context ([#9081](https://github.com/opensearch-project/OpenSearch/pull/9081)) +- Add support for reading partial files to HDFS repository ([#9513](https://github.com/opensearch-project/OpenSearch/issues/9513)) ### Dependencies - Bump `org.apache.logging.log4j:log4j-core` from 2.17.1 to 2.20.0 ([#8307](https://github.com/opensearch-project/OpenSearch/pull/8307)) diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsBlobContainer.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsBlobContainer.java index dcbd52d311230..669190f4e2490 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsBlobContainer.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsBlobContainer.java @@ -32,6 +32,7 @@ package org.opensearch.repositories.hdfs; import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; @@ -46,6 +47,7 @@ import org.opensearch.common.blobstore.fs.FsBlobContainer; import org.opensearch.common.blobstore.support.AbstractBlobContainer; import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.io.Streams; import org.opensearch.repositories.hdfs.HdfsBlobStore.Operation; import java.io.FileNotFoundException; @@ -125,8 +127,23 @@ public InputStream readBlob(String blobName) throws IOException { } @Override - public InputStream readBlob(String blobName, long position, long length) { - throw new UnsupportedOperationException(); + public InputStream readBlob(String blobName, long position, long length) throws IOException { + return store.execute(fileContext -> { + final FSDataInputStream stream; + try { + stream = fileContext.open(new Path(path, blobName), bufferSize); + } catch (FileNotFoundException fnfe) { + throw new NoSuchFileException("[" + blobName + "] blob not found"); + } + // Seek to the desired start position, closing the stream if any error occurs + try { + stream.seek(position); + } catch (Exception e) { + stream.close(); + throw e; + } + return Streams.limitStream(new HDFSPrivilegedInputSteam(stream, securityContext), length); + }); } @Override diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java index 6ff18b20036a8..0df39636b8ffa 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java @@ -66,8 +66,4 @@ protected Settings repositorySettings() { protected Collection> nodePlugins() { return Collections.singletonList(HdfsPlugin.class); } - - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9513") - @Override - public void testReadRange() {} } From a4024e78de06673b22281b09fec4afbf892d961c Mon Sep 17 00:00:00 2001 From: Navneet Verma Date: Thu, 24 Aug 2023 12:01:56 -0700 Subject: [PATCH 21/30] Removing the vec file extension from INDEX_STORE_HYBRID_NIO_EXTENSIONS, to ensure the no performance degradation for vector search via Lucene Engine. (#9528) Signed-off-by: Navneet Verma --- CHANGELOG.md | 1 + server/src/main/java/org/opensearch/index/IndexModule.java | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b0e902b1b97fa..11524d3d66161 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -154,6 +154,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Rethrow OpenSearch exception for non-concurrent path while using concurrent search ([#9177](https://github.com/opensearch-project/OpenSearch/pull/9177)) - Improve performance of encoding composite keys in multi-term aggregations ([#9412](https://github.com/opensearch-project/OpenSearch/pull/9412)) - Fix sort related ITs for concurrent search ([#9177](https://github.com/opensearch-project/OpenSearch/pull/9466) +- Removing the vec file extension from INDEX_STORE_HYBRID_NIO_EXTENSIONS, to ensure the no performance degradation for vector search via Lucene Engine.([#9528](https://github.com/opensearch-project/OpenSearch/pull/9528))) ### Deprecated diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index b4f0e474430f2..131e2a867ac8b 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -223,7 +223,6 @@ public Iterator> settings() { "tvd", "liv", "dii", - "vec", "vem" ), Function.identity(), From c90b6eac285055959d419ae67c369421355e78f8 Mon Sep 17 00:00:00 2001 From: Shourya Dutta Biswas <114977491+shourya035@users.noreply.github.com> Date: Fri, 25 Aug 2023 12:22:53 +0530 Subject: [PATCH 22/30] [Remote Store] Add total upload and download time from remote store to nodes stats (#9454) --------- Signed-off-by: Shourya Dutta Biswas <114977491+shourya035@users.noreply.github.com> --- CHANGELOG.md | 2 +- .../indices/stats/IndexStatsIT.java | 2 + .../RemoteSegmentStatsFromNodesStatsIT.java | 67 ++++++---- .../index/remote/RemoteSegmentStats.java | 41 +++++- .../remote/RemoteSegmentTransferTracker.java | 49 +++++--- .../shard/RemoteStoreRefreshListener.java | 56 +++++---- .../store/DirectoryFileTransferTracker.java | 118 +++++++++++------- .../org/opensearch/index/store/Store.java | 8 +- .../cluster/node/stats/NodeStatsTests.java | 4 + .../stats/RemoteStoreStatsTestHelper.java | 7 +- .../RemoteSegmentTransferTrackerTests.java | 50 ++++++-- .../RemoteStorePressureServiceTests.java | 2 +- 12 files changed, 285 insertions(+), 121 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 11524d3d66161..99b8120ee93df 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,7 +48,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) - Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) - [Remote Store] Add Segment download stats to remotestore stats API ([#8718](https://github.com/opensearch-project/OpenSearch/pull/8718)) -- [Remote Store] Add remote segment transfer stats on NodesStats API ([#9168](https://github.com/opensearch-project/OpenSearch/pull/9168) [#9393](https://github.com/opensearch-project/OpenSearch/pull/9393)) +- [Remote Store] Add remote segment transfer stats on NodesStats API ([#9168](https://github.com/opensearch-project/OpenSearch/pull/9168) [#9393](https://github.com/opensearch-project/OpenSearch/pull/9393) [#9454](https://github.com/opensearch-project/OpenSearch/pull/9454)) - Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) ### Deprecated diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java index af5191d7d2039..1a131a2a7eb3d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java @@ -1457,6 +1457,8 @@ private void assertZeroRemoteSegmentStats(RemoteSegmentStats remoteSegmentStats) assertEquals(0, remoteSegmentStats.getTotalRefreshBytesLag()); assertEquals(0, remoteSegmentStats.getMaxRefreshBytesLag()); assertEquals(0, remoteSegmentStats.getMaxRefreshTimeLag()); + assertEquals(0, remoteSegmentStats.getTotalUploadTime()); + assertEquals(0, remoteSegmentStats.getTotalDownloadTime()); } /** diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSegmentStatsFromNodesStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSegmentStatsFromNodesStatsIT.java index 19ad43b503ab7..c2e79ea2de5ef 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSegmentStatsFromNodesStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSegmentStatsFromNodesStatsIT.java @@ -67,7 +67,8 @@ public void testNodesStatsParityWithOnlyPrimaryShards() { indexSingleDoc(secondIndex, true); long cumulativeUploadsSucceeded = 0, cumulativeUploadsStarted = 0, cumulativeUploadsFailed = 0; - long total_bytes_lag = 0, max_bytes_lag = 0, max_time_lag = 0; + long totalBytesLag = 0, maxBytesLag = 0, maxTimeLag = 0; + long totalUploadTime = 0; // Fetch upload stats RemoteStoreStatsResponse remoteStoreStatsFirstIndex = client(randomDataNode).admin() .cluster() @@ -77,9 +78,10 @@ public void testNodesStatsParityWithOnlyPrimaryShards() { cumulativeUploadsSucceeded += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesSucceeded; cumulativeUploadsStarted += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesStarted; cumulativeUploadsFailed += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesFailed; - total_bytes_lag += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag; - max_bytes_lag = Math.max(max_bytes_lag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); - max_time_lag = Math.max(max_time_lag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); + totalBytesLag += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag; + maxBytesLag = Math.max(maxBytesLag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); + maxTimeLag = Math.max(maxTimeLag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); + totalUploadTime += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().totalUploadTimeInMs; RemoteStoreStatsResponse remoteStoreStatsSecondIndex = client(randomDataNode).admin() .cluster() @@ -90,9 +92,10 @@ public void testNodesStatsParityWithOnlyPrimaryShards() { cumulativeUploadsSucceeded += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesSucceeded; cumulativeUploadsStarted += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesStarted; cumulativeUploadsFailed += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesFailed; - total_bytes_lag += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag; - max_bytes_lag = Math.max(max_bytes_lag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); - max_time_lag = Math.max(max_time_lag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); + totalBytesLag += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag; + maxBytesLag = Math.max(maxBytesLag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); + maxTimeLag = Math.max(maxTimeLag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); + totalUploadTime += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().totalUploadTimeInMs; // Fetch nodes stats NodesStatsResponse nodesStatsResponse = client().admin() @@ -101,12 +104,13 @@ public void testNodesStatsParityWithOnlyPrimaryShards() { .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.Segments, true)) .get(); RemoteSegmentStats remoteSegmentStats = nodesStatsResponse.getNodes().get(0).getIndices().getSegments().getRemoteSegmentStats(); - assertEquals(cumulativeUploadsSucceeded, remoteSegmentStats.getUploadBytesSucceeded()); - assertEquals(cumulativeUploadsStarted, remoteSegmentStats.getUploadBytesStarted()); + assertTrue(cumulativeUploadsSucceeded > 0 && cumulativeUploadsSucceeded == remoteSegmentStats.getUploadBytesSucceeded()); + assertTrue(cumulativeUploadsStarted > 0 && cumulativeUploadsStarted == remoteSegmentStats.getUploadBytesStarted()); assertEquals(cumulativeUploadsFailed, remoteSegmentStats.getUploadBytesFailed()); - assertEquals(total_bytes_lag, remoteSegmentStats.getTotalRefreshBytesLag()); - assertEquals(max_bytes_lag, remoteSegmentStats.getMaxRefreshBytesLag()); - assertEquals(max_time_lag, remoteSegmentStats.getMaxRefreshTimeLag()); + assertEquals(totalBytesLag, remoteSegmentStats.getTotalRefreshBytesLag()); + assertEquals(maxBytesLag, remoteSegmentStats.getMaxRefreshBytesLag()); + assertEquals(maxTimeLag, remoteSegmentStats.getMaxRefreshTimeLag()); + assertTrue(totalUploadTime > 0 && totalUploadTime == remoteSegmentStats.getTotalUploadTime()); } /** @@ -180,13 +184,16 @@ private void assertZeroRemoteSegmentStats(RemoteSegmentStats remoteSegmentStats) assertEquals(0, remoteSegmentStats.getTotalRefreshBytesLag()); assertEquals(0, remoteSegmentStats.getMaxRefreshBytesLag()); assertEquals(0, remoteSegmentStats.getMaxRefreshTimeLag()); + assertEquals(0, remoteSegmentStats.getTotalUploadTime()); + assertEquals(0, remoteSegmentStats.getTotalDownloadTime()); } private static void assertNodeStatsParityAcrossNodes(String firstIndex, String secondIndex) { for (String dataNode : internalCluster().getDataNodeNames()) { long cumulativeUploadsSucceeded = 0, cumulativeUploadsStarted = 0, cumulativeUploadsFailed = 0; long cumulativeDownloadsSucceeded = 0, cumulativeDownloadsStarted = 0, cumulativeDownloadsFailed = 0; - long total_bytes_lag = 0, max_bytes_lag = 0, max_time_lag = 0; + long totalBytesLag = 0, maxBytesLag = 0, maxTimeLag = 0; + long totalUploadTime = 0, totalDownloadTime = 0; // Fetch upload stats RemoteStoreStatsResponse remoteStoreStatsFirstIndex = client(dataNode).admin() .cluster() @@ -202,9 +209,12 @@ private static void assertNodeStatsParityAcrossNodes(String firstIndex, String s .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesStarted; cumulativeDownloadsFailed += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0] .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesFailed; - total_bytes_lag += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag; - max_bytes_lag = Math.max(max_bytes_lag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); - max_time_lag = Math.max(max_time_lag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); + totalBytesLag += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag; + maxBytesLag = Math.max(maxBytesLag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); + maxTimeLag = Math.max(maxTimeLag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); + totalUploadTime += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().totalUploadTimeInMs; + totalDownloadTime += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0] + .getSegmentStats().directoryFileTransferTrackerStats.totalTransferTimeInMs; RemoteStoreStatsResponse remoteStoreStatsSecondIndex = client(dataNode).admin() .cluster() @@ -220,9 +230,12 @@ private static void assertNodeStatsParityAcrossNodes(String firstIndex, String s .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesStarted; cumulativeDownloadsFailed += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0] .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesFailed; - total_bytes_lag += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag; - max_bytes_lag = Math.max(max_bytes_lag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); - max_time_lag = Math.max(max_time_lag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); + totalBytesLag += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag; + maxBytesLag = Math.max(maxBytesLag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); + maxTimeLag = Math.max(maxTimeLag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); + totalUploadTime += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().totalUploadTimeInMs; + totalDownloadTime += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0] + .getSegmentStats().directoryFileTransferTrackerStats.totalTransferTimeInMs; // Fetch nodes stats NodesStatsResponse nodesStatsResponse = client().admin() @@ -237,9 +250,19 @@ private static void assertNodeStatsParityAcrossNodes(String firstIndex, String s assertEquals(cumulativeDownloadsSucceeded, remoteSegmentStats.getDownloadBytesSucceeded()); assertEquals(cumulativeDownloadsStarted, remoteSegmentStats.getDownloadBytesStarted()); assertEquals(cumulativeDownloadsFailed, remoteSegmentStats.getDownloadBytesFailed()); - assertEquals(total_bytes_lag, remoteSegmentStats.getTotalRefreshBytesLag()); - assertEquals(max_bytes_lag, remoteSegmentStats.getMaxRefreshBytesLag()); - assertEquals(max_time_lag, remoteSegmentStats.getMaxRefreshTimeLag()); + assertEquals(totalBytesLag, remoteSegmentStats.getTotalRefreshBytesLag()); + assertEquals(maxBytesLag, remoteSegmentStats.getMaxRefreshBytesLag()); + assertEquals(maxTimeLag, remoteSegmentStats.getMaxRefreshTimeLag()); + // Ensure that total upload time has non-zero value if there has been segments uploaded from the node + if (cumulativeUploadsStarted > 0) { + assertTrue(totalUploadTime > 0); + } + assertEquals(totalUploadTime, remoteSegmentStats.getTotalUploadTime()); + // Ensure that total download time has non-zero value if there has been segments downloaded to the node + if (cumulativeDownloadsStarted > 0) { + assertTrue(totalDownloadTime > 0); + } + assertEquals(totalDownloadTime, remoteSegmentStats.getTotalDownloadTime()); } } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java index 0ff61d49c00f8..ace026e28ab7c 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java @@ -67,6 +67,14 @@ public class RemoteSegmentStats implements Writeable, ToXContentFragment { * Used to check for data freshness in the remote store */ private long totalRefreshBytesLag; + /** + * Total time spent in uploading segments to remote store + */ + private long totalUploadTime; + /** + * Total time spent in downloading segments from remote store + */ + private long totalDownloadTime; public RemoteSegmentStats() {} @@ -89,8 +97,10 @@ public RemoteSegmentStats(StreamInput in) throws IOException { This would have to be removed after the new field addition PRs are also backported to 2.x. If possible we would need to ensure that all field addition PRs are backported at once */ - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.CURRENT)) { totalRefreshBytesLag = in.readLong(); + totalUploadTime = in.readLong(); + totalDownloadTime = in.readLong(); } } @@ -115,9 +125,12 @@ public RemoteSegmentStats(RemoteSegmentTransferTracker.Stats trackerStats) { // Aggregations would be performed on the add method this.maxRefreshBytesLag = trackerStats.bytesLag; this.totalRefreshBytesLag = trackerStats.bytesLag; + this.totalUploadTime = trackerStats.totalUploadTimeInMs; + this.totalDownloadTime = trackerStats.directoryFileTransferTrackerStats.totalTransferTimeInMs; } // Getter and setters. All are visible for testing + // Setters are only used for testing public long getUploadBytesStarted() { return uploadBytesStarted; } @@ -190,6 +203,22 @@ public void addTotalRefreshBytesLag(long totalRefreshBytesLag) { this.totalRefreshBytesLag += totalRefreshBytesLag; } + public long getTotalUploadTime() { + return totalUploadTime; + } + + public void addTotalUploadTime(long totalUploadTime) { + this.totalUploadTime += totalUploadTime; + } + + public long getTotalDownloadTime() { + return totalDownloadTime; + } + + public void addTotalDownloadTime(long totalDownloadTime) { + this.totalDownloadTime += totalDownloadTime; + } + /** * Adds existing stats. Used for stats roll-ups at index or node level * @@ -206,6 +235,8 @@ public void add(RemoteSegmentStats existingStats) { this.maxRefreshTimeLag = Math.max(this.maxRefreshTimeLag, existingStats.getMaxRefreshTimeLag()); this.maxRefreshBytesLag = Math.max(this.maxRefreshBytesLag, existingStats.getMaxRefreshBytesLag()); this.totalRefreshBytesLag += existingStats.getTotalRefreshBytesLag(); + this.totalUploadTime += existingStats.getTotalUploadTime(); + this.totalDownloadTime += existingStats.getTotalDownloadTime(); } } @@ -229,8 +260,10 @@ public void writeTo(StreamOutput out) throws IOException { This would have to be removed after the new field addition PRs are also backported to 2.x. If possible we would need to ensure that all field addition PRs are backported at once */ - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.CURRENT)) { out.writeLong(totalRefreshBytesLag); + out.writeLong(totalUploadTime); + out.writeLong(totalDownloadTime); } } @@ -258,6 +291,7 @@ private void buildUploadStats(XContentBuilder builder) throws IOException { builder.humanReadableField(Fields.MAX_BYTES, Fields.MAX, new ByteSizeValue(maxRefreshBytesLag)); builder.endObject(); builder.humanReadableField(Fields.MAX_REFRESH_TIME_LAG_IN_MILLIS, Fields.MAX_REFRESH_TIME_LAG, new TimeValue(maxRefreshTimeLag)); + builder.humanReadableField(Fields.TOTAL_TIME_SPENT_IN_MILLIS, Fields.TOTAL_TIME_SPENT, new TimeValue(totalUploadTime)); } private void buildDownloadStats(XContentBuilder builder) throws IOException { @@ -266,6 +300,7 @@ private void buildDownloadStats(XContentBuilder builder) throws IOException { builder.humanReadableField(Fields.SUCCEEDED_BYTES, Fields.SUCCEEDED, new ByteSizeValue(downloadBytesSucceeded)); builder.humanReadableField(Fields.FAILED_BYTES, Fields.FAILED, new ByteSizeValue(downloadBytesFailed)); builder.endObject(); + builder.humanReadableField(Fields.TOTAL_TIME_SPENT_IN_MILLIS, Fields.TOTAL_TIME_SPENT, new TimeValue(totalDownloadTime)); } static final class Fields { @@ -287,5 +322,7 @@ static final class Fields { static final String TOTAL_BYTES = "total_bytes"; static final String MAX = "max"; static final String MAX_BYTES = "max_bytes"; + static final String TOTAL_TIME_SPENT = "total_time_spent"; + static final String TOTAL_TIME_SPENT_IN_MILLIS = "total_time_spent_in_millis"; } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java index 1531f74597a03..95902fd375145 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java @@ -96,17 +96,17 @@ public class RemoteSegmentTransferTracker { /** * Cumulative sum of size in bytes of segment files for which upload has started during remote refresh. */ - private volatile long uploadBytesStarted; + private final AtomicLong uploadBytesStarted = new AtomicLong(); /** * Cumulative sum of size in bytes of segment files for which upload has failed during remote refresh. */ - private volatile long uploadBytesFailed; + private final AtomicLong uploadBytesFailed = new AtomicLong(); /** * Cumulative sum of size in bytes of segment files for which upload has succeeded during remote refresh. */ - private volatile long uploadBytesSucceeded; + private final AtomicLong uploadBytesSucceeded = new AtomicLong(); /** * Cumulative sum of count of remote refreshes that have started. @@ -123,6 +123,11 @@ public class RemoteSegmentTransferTracker { */ private volatile long totalUploadsSucceeded; + /** + * Cumulative sum of time taken in remote refresh (in milliseconds) [Tracked per file] + */ + private AtomicLong totalUploadTimeInMs = new AtomicLong(); + /** * Cumulative sum of rejection counts for this shard. */ @@ -316,31 +321,31 @@ public long getBytesLag() { } public long getUploadBytesStarted() { - return uploadBytesStarted; + return uploadBytesStarted.get(); } public void addUploadBytesStarted(long size) { - uploadBytesStarted += size; + uploadBytesStarted.getAndAdd(size); } public long getUploadBytesFailed() { - return uploadBytesFailed; + return uploadBytesFailed.get(); } public void addUploadBytesFailed(long size) { - uploadBytesFailed += size; + uploadBytesFailed.getAndAdd(size); } public long getUploadBytesSucceeded() { - return uploadBytesSucceeded; + return uploadBytesSucceeded.get(); } public void addUploadBytesSucceeded(long size) { - uploadBytesSucceeded += size; + uploadBytesSucceeded.getAndAdd(size); } public long getInflightUploadBytes() { - return uploadBytesStarted - uploadBytesFailed - uploadBytesSucceeded; + return uploadBytesStarted.get() - uploadBytesFailed.get() - uploadBytesSucceeded.get(); } public long getTotalUploadsStarted() { @@ -508,7 +513,7 @@ boolean isUploadTimeMsAverageReady() { return uploadTimeMsMovingAverageReference.get().getAverage(); } - public void addUploadTimeMs(long timeMs) { + public void addTimeForCompletedUploadSync(long timeMs) { synchronized (uploadTimeMsMutex) { this.uploadTimeMsMovingAverageReference.get().record(timeMs); } @@ -525,6 +530,14 @@ void updateUploadTimeMsMovingAverageWindowSize(int updatedSize) { } } + public void addTotalUploadTimeInMs(long fileUploadTimeInMs) { + this.totalUploadTimeInMs.addAndGet(fileUploadTimeInMs); + } + + public long getTotalUploadTimeInMs() { + return totalUploadTimeInMs.get(); + } + public DirectoryFileTransferTracker getDirectoryFileTransferTracker() { return directoryFileTransferTracker; } @@ -537,9 +550,9 @@ public RemoteSegmentTransferTracker.Stats stats() { timeMsLag, localRefreshSeqNo, remoteRefreshSeqNo, - uploadBytesStarted, - uploadBytesSucceeded, - uploadBytesFailed, + uploadBytesStarted.get(), + uploadBytesSucceeded.get(), + uploadBytesFailed.get(), totalUploadsStarted, totalUploadsSucceeded, totalUploadsFailed, @@ -550,6 +563,7 @@ public RemoteSegmentTransferTracker.Stats stats() { uploadBytesPerSecMovingAverageReference.get().getAverage(), uploadTimeMsMovingAverageReference.get().getAverage(), getBytesLag(), + totalUploadTimeInMs.get(), directoryFileTransferTracker.stats() ); } @@ -578,6 +592,7 @@ public static class Stats implements Writeable { public final long lastSuccessfulRemoteRefreshBytes; public final double uploadBytesMovingAverage; public final double uploadBytesPerSecMovingAverage; + public final long totalUploadTimeInMs; public final double uploadTimeMovingAverage; public final long bytesLag; public final DirectoryFileTransferTracker.Stats directoryFileTransferTrackerStats; @@ -602,6 +617,7 @@ public Stats( double uploadBytesPerSecMovingAverage, double uploadTimeMovingAverage, long bytesLag, + long totalUploadTimeInMs, DirectoryFileTransferTracker.Stats directoryFileTransferTrackerStats ) { this.shardId = shardId; @@ -623,6 +639,7 @@ public Stats( this.uploadBytesPerSecMovingAverage = uploadBytesPerSecMovingAverage; this.uploadTimeMovingAverage = uploadTimeMovingAverage; this.bytesLag = bytesLag; + this.totalUploadTimeInMs = totalUploadTimeInMs; this.directoryFileTransferTrackerStats = directoryFileTransferTrackerStats; } @@ -647,6 +664,7 @@ public Stats(StreamInput in) throws IOException { this.uploadBytesPerSecMovingAverage = in.readDouble(); this.uploadTimeMovingAverage = in.readDouble(); this.bytesLag = in.readLong(); + this.totalUploadTimeInMs = in.readLong(); this.directoryFileTransferTrackerStats = in.readOptionalWriteable(DirectoryFileTransferTracker.Stats::new); } catch (IOException e) { throw e; @@ -674,6 +692,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeDouble(uploadBytesPerSecMovingAverage); out.writeDouble(uploadTimeMovingAverage); out.writeLong(bytesLag); + out.writeLong(totalUploadTimeInMs); out.writeOptionalWriteable(directoryFileTransferTrackerStats); } @@ -702,6 +721,7 @@ public boolean equals(Object obj) { && Double.compare(this.uploadBytesPerSecMovingAverage, other.uploadBytesPerSecMovingAverage) == 0 && Double.compare(this.uploadTimeMovingAverage, other.uploadTimeMovingAverage) == 0 && this.bytesLag == other.bytesLag + && this.totalUploadTimeInMs == other.totalUploadTimeInMs && this.directoryFileTransferTrackerStats.equals(other.directoryFileTransferTrackerStats); } @@ -727,6 +747,7 @@ public int hashCode() { uploadBytesPerSecMovingAverage, uploadTimeMovingAverage, bytesLag, + totalUploadTimeInMs, directoryFileTransferTrackerStats ); } diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 2f0d11fb6a8b3..e8a9ec866ac01 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -89,7 +89,6 @@ public final class RemoteStoreRefreshListener extends CloseableRetryableRefreshL private long primaryTerm; private volatile Iterator backoffDelayIterator; private final SegmentReplicationCheckpointPublisher checkpointPublisher; - private final UploadListener statsListener; public RemoteStoreRefreshListener( IndexShard indexShard, @@ -117,26 +116,6 @@ public RemoteStoreRefreshListener( this.segmentTracker = segmentTracker; resetBackOffDelayIterator(); this.checkpointPublisher = checkpointPublisher; - this.statsListener = new UploadListener() { - @Override - public void beforeUpload(String file) { - // Start tracking the upload bytes started - segmentTracker.addUploadBytesStarted(segmentTracker.getLatestLocalFileNameLengthMap().get(file)); - } - - @Override - public void onSuccess(String file) { - // Track upload success - segmentTracker.addUploadBytesSucceeded(segmentTracker.getLatestLocalFileNameLengthMap().get(file)); - segmentTracker.addToLatestUploadedFiles(file); - } - - @Override - public void onFailure(String file) { - // Track upload failure - segmentTracker.addUploadBytesFailed(segmentTracker.getLatestLocalFileNameLengthMap().get(file)); - } - }; } @Override @@ -373,6 +352,8 @@ private void uploadNewSegments(Collection localSegmentsPostRefresh, Acti GroupedActionListener batchUploadListener = new GroupedActionListener<>(mappedListener, filteredFiles.size()); for (String src : filteredFiles) { + // Initializing listener here to ensure that the stats increment operations are thread-safe + UploadListener statsListener = createUploadListener(); ActionListener aggregatedListener = ActionListener.wrap(resp -> { statsListener.onSuccess(src); batchUploadListener.onResponse(resp); @@ -443,12 +424,43 @@ private void updateFinalStatusInSegmentTracker(boolean uploadStatus, long bytesB segmentTracker.incrementTotalUploadsSucceeded(); segmentTracker.addUploadBytes(bytesUploaded); segmentTracker.addUploadBytesPerSec((bytesUploaded * 1_000L) / Math.max(1, timeTakenInMS)); - segmentTracker.addUploadTimeMs(timeTakenInMS); + segmentTracker.addTimeForCompletedUploadSync(timeTakenInMS); } else { segmentTracker.incrementTotalUploadsFailed(); } } + /** + * Creates an {@link UploadListener} containing the stats population logic which would be triggered before and after segment upload events + */ + private UploadListener createUploadListener() { + return new UploadListener() { + private long uploadStartTime = 0; + + @Override + public void beforeUpload(String file) { + // Start tracking the upload bytes started + segmentTracker.addUploadBytesStarted(segmentTracker.getLatestLocalFileNameLengthMap().get(file)); + uploadStartTime = System.currentTimeMillis(); + } + + @Override + public void onSuccess(String file) { + // Track upload success + segmentTracker.addUploadBytesSucceeded(segmentTracker.getLatestLocalFileNameLengthMap().get(file)); + segmentTracker.addToLatestUploadedFiles(file); + segmentTracker.addTotalUploadTimeInMs(Math.max(1, System.currentTimeMillis() - uploadStartTime)); + } + + @Override + public void onFailure(String file) { + // Track upload failure + segmentTracker.addUploadBytesFailed(segmentTracker.getLatestLocalFileNameLengthMap().get(file)); + segmentTracker.addTotalUploadTimeInMs(Math.max(1, System.currentTimeMillis() - uploadStartTime)); + } + }; + } + @Override protected Logger getLogger() { return logger; diff --git a/server/src/main/java/org/opensearch/index/store/DirectoryFileTransferTracker.java b/server/src/main/java/org/opensearch/index/store/DirectoryFileTransferTracker.java index 5e12517becaf2..7ad48cb56a33b 100644 --- a/server/src/main/java/org/opensearch/index/store/DirectoryFileTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/store/DirectoryFileTransferTracker.java @@ -8,6 +8,7 @@ package org.opensearch.index.store; +import org.apache.lucene.store.Directory; import org.opensearch.common.util.MovingAverage; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -15,128 +16,150 @@ import java.io.IOException; import java.util.Objects; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; /** - * Tracks the amount of bytes transferred between two {@link org.apache.lucene.store.Directory} instances + * Tracks the amount of bytes transferred between two {@link Directory} instances * * @opensearch.internal */ public class DirectoryFileTransferTracker { /** - * Cumulative size of files (in bytes) attempted to be transferred over from the source {@link org.apache.lucene.store.Directory} + * Cumulative size of files (in bytes) attempted to be transferred over from the source {@link Directory} */ - private volatile long transferredBytesStarted; + private final AtomicLong transferredBytesStarted = new AtomicLong(); /** - * Cumulative size of files (in bytes) successfully transferred over from the source {@link org.apache.lucene.store.Directory} + * Cumulative size of files (in bytes) successfully transferred over from the source {@link Directory} */ - private volatile long transferredBytesFailed; + private final AtomicLong transferredBytesFailed = new AtomicLong(); /** - * Cumulative size of files (in bytes) failed in transfer over from the source {@link org.apache.lucene.store.Directory} + * Cumulative size of files (in bytes) failed in transfer over from the source {@link Directory} */ - private volatile long transferredBytesSucceeded; + private final AtomicLong transferredBytesSucceeded = new AtomicLong(); /** - * Time in milliseconds for the last successful transfer from the source {@link org.apache.lucene.store.Directory} + * Time in milliseconds for the last successful transfer from the source {@link Directory} */ - private volatile long lastTransferTimestampMs; + private final AtomicLong lastTransferTimestampMs = new AtomicLong(); /** - * Provides moving average over the last N total size in bytes of files transferred from the source {@link org.apache.lucene.store.Directory}. + * Cumulative time in milliseconds spent in successful transfers from the source {@link Directory} + */ + private final AtomicLong totalTransferTimeInMs = new AtomicLong(); + + /** + * Provides moving average over the last N total size in bytes of files transferred from the source {@link Directory}. * N is window size */ - private volatile MovingAverage transferredBytesMovingAverageReference; + private final AtomicReference transferredBytesMovingAverageReference; - private volatile long lastSuccessfulTransferInBytes; + private final AtomicLong lastSuccessfulTransferInBytes = new AtomicLong(); /** - * Provides moving average over the last N transfer speed (in bytes/s) of segment files transferred from the source {@link org.apache.lucene.store.Directory}. + * Provides moving average over the last N transfer speed (in bytes/s) of segment files transferred from the source {@link Directory}. * N is window size */ - private volatile MovingAverage transferredBytesPerSecMovingAverageReference; + private final AtomicReference transferredBytesPerSecMovingAverageReference; private final int DIRECTORY_FILES_TRANSFER_DEFAULT_WINDOW_SIZE = 20; + // Getters and Setters, all are visible for testing public long getTransferredBytesStarted() { - return transferredBytesStarted; + return transferredBytesStarted.get(); } public void addTransferredBytesStarted(long size) { - transferredBytesStarted += size; + transferredBytesStarted.getAndAdd(size); } public long getTransferredBytesFailed() { - return transferredBytesFailed; + return transferredBytesFailed.get(); } - public void addTransferredBytesFailed(long size) { - transferredBytesFailed += size; + public void addTransferredBytesFailed(long size, long startTimeInMs) { + transferredBytesFailed.getAndAdd(size); + addTotalTransferTimeInMs(Math.max(1, System.currentTimeMillis() - startTimeInMs)); } public long getTransferredBytesSucceeded() { - return transferredBytesSucceeded; + return transferredBytesSucceeded.get(); } public void addTransferredBytesSucceeded(long size, long startTimeInMs) { - transferredBytesSucceeded += size; - updateLastSuccessfulTransferSize(size); + transferredBytesSucceeded.getAndAdd(size); + updateSuccessfulTransferSize(size); long currentTimeInMs = System.currentTimeMillis(); updateLastTransferTimestampMs(currentTimeInMs); long timeTakenInMS = Math.max(1, currentTimeInMs - startTimeInMs); + addTotalTransferTimeInMs(timeTakenInMS); addTransferredBytesPerSec((size * 1_000L) / timeTakenInMS); } public boolean isTransferredBytesPerSecAverageReady() { - return transferredBytesPerSecMovingAverageReference.isReady(); + return transferredBytesPerSecMovingAverageReference.get().isReady(); } public double getTransferredBytesPerSecAverage() { - return transferredBytesPerSecMovingAverageReference.getAverage(); + return transferredBytesPerSecMovingAverageReference.get().getAverage(); } - // Visible for testing public void addTransferredBytesPerSec(long bytesPerSec) { - this.transferredBytesPerSecMovingAverageReference.record(bytesPerSec); + this.transferredBytesPerSecMovingAverageReference.get().record(bytesPerSec); } public boolean isTransferredBytesAverageReady() { - return transferredBytesMovingAverageReference.isReady(); + return transferredBytesMovingAverageReference.get().isReady(); } public double getTransferredBytesAverage() { - return transferredBytesMovingAverageReference.getAverage(); + return transferredBytesMovingAverageReference.get().getAverage(); + } + + public void updateLastSuccessfulTransferInBytes(long size) { + lastSuccessfulTransferInBytes.set(size); } - // Visible for testing - public void updateLastSuccessfulTransferSize(long size) { - lastSuccessfulTransferInBytes = size; - this.transferredBytesMovingAverageReference.record(size); + public void updateSuccessfulTransferSize(long size) { + updateLastSuccessfulTransferInBytes(size); + this.transferredBytesMovingAverageReference.get().record(size); } public long getLastTransferTimestampMs() { - return lastTransferTimestampMs; + return lastTransferTimestampMs.get(); } - // Visible for testing public void updateLastTransferTimestampMs(long downloadTimestampInMs) { - this.lastTransferTimestampMs = downloadTimestampInMs; + this.lastTransferTimestampMs.set(downloadTimestampInMs); + } + + public void addTotalTransferTimeInMs(long totalTransferTimeInMs) { + this.totalTransferTimeInMs.addAndGet(totalTransferTimeInMs); + } + + public long getTotalTransferTimeInMs() { + return totalTransferTimeInMs.get(); } public DirectoryFileTransferTracker() { - transferredBytesMovingAverageReference = new MovingAverage(DIRECTORY_FILES_TRANSFER_DEFAULT_WINDOW_SIZE); - transferredBytesPerSecMovingAverageReference = new MovingAverage(DIRECTORY_FILES_TRANSFER_DEFAULT_WINDOW_SIZE); + transferredBytesMovingAverageReference = new AtomicReference<>(new MovingAverage(DIRECTORY_FILES_TRANSFER_DEFAULT_WINDOW_SIZE)); + transferredBytesPerSecMovingAverageReference = new AtomicReference<>( + new MovingAverage(DIRECTORY_FILES_TRANSFER_DEFAULT_WINDOW_SIZE) + ); } public DirectoryFileTransferTracker.Stats stats() { return new Stats( - transferredBytesStarted, - transferredBytesFailed, - transferredBytesSucceeded, - lastTransferTimestampMs, - transferredBytesMovingAverageReference.getAverage(), - lastSuccessfulTransferInBytes, - transferredBytesPerSecMovingAverageReference.getAverage() + transferredBytesStarted.get(), + transferredBytesFailed.get(), + transferredBytesSucceeded.get(), + lastTransferTimestampMs.get(), + totalTransferTimeInMs.get(), + transferredBytesMovingAverageReference.get().getAverage(), + lastSuccessfulTransferInBytes.get(), + transferredBytesPerSecMovingAverageReference.get().getAverage() ); } @@ -150,6 +173,7 @@ public static class Stats implements Writeable { public final long transferredBytesFailed; public final long transferredBytesSucceeded; public final long lastTransferTimestampMs; + public final long totalTransferTimeInMs; public final double transferredBytesMovingAverage; public final long lastSuccessfulTransferInBytes; public final double transferredBytesPerSecMovingAverage; @@ -159,6 +183,7 @@ public Stats( long transferredBytesFailed, long downloadBytesSucceeded, long lastTransferTimestampMs, + long totalTransferTimeInMs, double transferredBytesMovingAverage, long lastSuccessfulTransferInBytes, double transferredBytesPerSecMovingAverage @@ -167,6 +192,7 @@ public Stats( this.transferredBytesFailed = transferredBytesFailed; this.transferredBytesSucceeded = downloadBytesSucceeded; this.lastTransferTimestampMs = lastTransferTimestampMs; + this.totalTransferTimeInMs = totalTransferTimeInMs; this.transferredBytesMovingAverage = transferredBytesMovingAverage; this.lastSuccessfulTransferInBytes = lastSuccessfulTransferInBytes; this.transferredBytesPerSecMovingAverage = transferredBytesPerSecMovingAverage; @@ -177,6 +203,7 @@ public Stats(StreamInput in) throws IOException { this.transferredBytesFailed = in.readLong(); this.transferredBytesSucceeded = in.readLong(); this.lastTransferTimestampMs = in.readLong(); + this.totalTransferTimeInMs = in.readLong(); this.transferredBytesMovingAverage = in.readDouble(); this.lastSuccessfulTransferInBytes = in.readLong(); this.transferredBytesPerSecMovingAverage = in.readDouble(); @@ -188,6 +215,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(transferredBytesFailed); out.writeLong(transferredBytesSucceeded); out.writeLong(lastTransferTimestampMs); + out.writeLong(totalTransferTimeInMs); out.writeDouble(transferredBytesMovingAverage); out.writeLong(lastSuccessfulTransferInBytes); out.writeDouble(transferredBytesPerSecMovingAverage); @@ -203,6 +231,7 @@ public boolean equals(Object obj) { && transferredBytesFailed == stats.transferredBytesFailed && transferredBytesSucceeded == stats.transferredBytesSucceeded && lastTransferTimestampMs == stats.lastTransferTimestampMs + && totalTransferTimeInMs == stats.totalTransferTimeInMs && Double.compare(stats.transferredBytesMovingAverage, transferredBytesMovingAverage) == 0 && lastSuccessfulTransferInBytes == stats.lastSuccessfulTransferInBytes && Double.compare(stats.transferredBytesPerSecMovingAverage, transferredBytesPerSecMovingAverage) == 0; @@ -215,6 +244,7 @@ public int hashCode() { transferredBytesFailed, transferredBytesSucceeded, lastTransferTimestampMs, + totalTransferTimeInMs, transferredBytesMovingAverage, lastSuccessfulTransferInBytes, transferredBytesPerSecMovingAverage diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 4f51994a6ac2f..b3ea2cdd02e21 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -950,14 +950,14 @@ public void copyFrom(Directory from, String src, String dest, IOContext context) long fileSize = from.fileLength(src); beforeDownload(fileSize); boolean success = false; + long startTime = System.currentTimeMillis(); try { - long startTime = System.currentTimeMillis(); super.copyFrom(from, src, dest, context); success = true; afterDownload(fileSize, startTime); } finally { if (!success) { - downloadFailed(fileSize); + downloadFailed(fileSize, startTime); } } } @@ -983,8 +983,8 @@ private void afterDownload(long fileSize, long startTimeInMs) { /** * Updates the amount of bytes failed in download */ - private void downloadFailed(long fileSize) { - directoryFileTransferTracker.addTransferredBytesFailed(fileSize); + private void downloadFailed(long fileSize, long startTimeInMs) { + directoryFileTransferTracker.addTransferredBytesFailed(fileSize, startTimeInMs); } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index fbe70748adf2d..8a450b99904cf 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -460,6 +460,8 @@ public void testSerialization() throws IOException { assertEquals(remoteSegmentStats.getMaxRefreshTimeLag(), deserializedRemoteSegmentStats.getMaxRefreshTimeLag()); assertEquals(remoteSegmentStats.getMaxRefreshBytesLag(), deserializedRemoteSegmentStats.getMaxRefreshBytesLag()); assertEquals(remoteSegmentStats.getTotalRefreshBytesLag(), deserializedRemoteSegmentStats.getTotalRefreshBytesLag()); + assertEquals(remoteSegmentStats.getTotalUploadTime(), deserializedRemoteSegmentStats.getTotalUploadTime()); + assertEquals(remoteSegmentStats.getTotalDownloadTime(), deserializedRemoteSegmentStats.getTotalDownloadTime()); } } } @@ -793,6 +795,8 @@ private static NodeIndicesStats getNodeIndicesStats(boolean remoteStoreStats) { remoteSegmentStats.addTotalRefreshBytesLag(5L); remoteSegmentStats.addMaxRefreshBytesLag(2L); remoteSegmentStats.setMaxRefreshTimeLag(2L); + remoteSegmentStats.addTotalUploadTime(20L); + remoteSegmentStats.addTotalDownloadTime(20L); } return indicesStats; } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTestHelper.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTestHelper.java index 7430ccaed725b..e2a0209503976 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTestHelper.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTestHelper.java @@ -46,6 +46,7 @@ static RemoteSegmentTransferTracker.Stats createStatsForNewPrimary(ShardId shard 0, 0, 0, + 10, createZeroDirectoryFileTransferStats() ); } @@ -71,6 +72,7 @@ static RemoteSegmentTransferTracker.Stats createStatsForNewReplica(ShardId shard 0, 0, 0, + 0, createSampleDirectoryFileTransferStats() ); } @@ -96,16 +98,17 @@ static RemoteSegmentTransferTracker.Stats createStatsForRemoteStoreRestoredPrima 0, 0, 100, + 10, createSampleDirectoryFileTransferStats() ); } static DirectoryFileTransferTracker.Stats createSampleDirectoryFileTransferStats() { - return new DirectoryFileTransferTracker.Stats(10, 0, 10, 12345, 5, 5, 5); + return new DirectoryFileTransferTracker.Stats(10, 0, 10, 12345, 5, 5, 5, 10); } static DirectoryFileTransferTracker.Stats createZeroDirectoryFileTransferStats() { - return new DirectoryFileTransferTracker.Stats(0, 0, 0, 0, 0, 0, 0); + return new DirectoryFileTransferTracker.Stats(0, 0, 0, 0, 0, 0, 0, 0); } static ShardRouting createShardRouting(ShardId shardId, boolean isPrimary) { diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java index 94934d5b4dca6..10fe3f95ab47c 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java @@ -242,10 +242,10 @@ public void testAddDownloadBytesFailed() { pressureSettings.getUploadTimeMovingAverageWindowSize() ); long bytesToAdd = randomLongBetween(1000, 1000000); - pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesFailed(bytesToAdd); + pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesFailed(bytesToAdd, System.currentTimeMillis()); assertEquals(bytesToAdd, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesFailed()); long moreBytesToAdd = randomLongBetween(1000, 10000); - pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesFailed(moreBytesToAdd); + pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesFailed(moreBytesToAdd, System.currentTimeMillis()); assertEquals(bytesToAdd + moreBytesToAdd, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesFailed()); } @@ -473,18 +473,18 @@ public void testIsUploadTimeMsAverageReady() { long sum = 0; for (int i = 1; i < uploadTimeMovingAverageWindowSize; i++) { - pressureTracker.addUploadTimeMs(i); + pressureTracker.addTimeForCompletedUploadSync(i); sum += i; assertFalse(pressureTracker.isUploadTimeMsAverageReady()); assertEquals((double) sum / i, pressureTracker.getUploadTimeMsAverage(), 0.0d); } - pressureTracker.addUploadTimeMs(uploadTimeMovingAverageWindowSize); + pressureTracker.addTimeForCompletedUploadSync(uploadTimeMovingAverageWindowSize); sum += uploadTimeMovingAverageWindowSize; assertTrue(pressureTracker.isUploadTimeMsAverageReady()); assertEquals((double) sum / uploadTimeMovingAverageWindowSize, pressureTracker.getUploadTimeMsAverage(), 0.0d); - pressureTracker.addUploadTimeMs(100); + pressureTracker.addTimeForCompletedUploadSync(100); sum = sum + 100 - 1; assertEquals((double) sum / uploadTimeMovingAverageWindowSize, pressureTracker.getUploadTimeMsAverage(), 0.0d); } @@ -501,18 +501,18 @@ public void testIsDownloadBytesAverageReady() { long sum = 0; for (int i = 1; i < 20; i++) { - pressureTracker.getDirectoryFileTransferTracker().updateLastSuccessfulTransferSize(i); + pressureTracker.getDirectoryFileTransferTracker().updateSuccessfulTransferSize(i); sum += i; assertFalse(pressureTracker.getDirectoryFileTransferTracker().isTransferredBytesAverageReady()); assertEquals((double) sum / i, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesAverage(), 0.0d); } - pressureTracker.getDirectoryFileTransferTracker().updateLastSuccessfulTransferSize(20); + pressureTracker.getDirectoryFileTransferTracker().updateSuccessfulTransferSize(20); sum += 20; assertTrue(pressureTracker.getDirectoryFileTransferTracker().isTransferredBytesAverageReady()); assertEquals((double) sum / 20, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesAverage(), 0.0d); - pressureTracker.getDirectoryFileTransferTracker().updateLastSuccessfulTransferSize(100); + pressureTracker.getDirectoryFileTransferTracker().updateSuccessfulTransferSize(100); sum = sum + 100 - 1; assertEquals((double) sum / 20, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesAverage(), 0.0d); } @@ -545,6 +545,38 @@ public void testIsDownloadBytesPerSecAverageReady() { assertEquals((double) sum / 20, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesPerSecAverage(), 0.0d); } + public void testAddTotalUploadTimeInMs() { + pressureTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + pressureSettings.getUploadBytesMovingAverageWindowSize(), + pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), + pressureSettings.getUploadTimeMovingAverageWindowSize() + ); + long timeToAdd = randomLongBetween(100, 200); + pressureTracker.addTotalUploadTimeInMs(timeToAdd); + assertEquals(timeToAdd, pressureTracker.getTotalUploadTimeInMs()); + long moreTimeToAdd = randomLongBetween(100, 200); + pressureTracker.addTotalUploadTimeInMs(moreTimeToAdd); + assertEquals(timeToAdd + moreTimeToAdd, pressureTracker.getTotalUploadTimeInMs()); + } + + public void testAddTotalTransferTimeMs() { + pressureTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + pressureSettings.getUploadBytesMovingAverageWindowSize(), + pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), + pressureSettings.getUploadTimeMovingAverageWindowSize() + ); + long timeToAdd = randomLongBetween(100, 200); + pressureTracker.getDirectoryFileTransferTracker().addTotalTransferTimeInMs(timeToAdd); + assertEquals(timeToAdd, pressureTracker.getDirectoryFileTransferTracker().getTotalTransferTimeInMs()); + long moreTimeToAdd = randomLongBetween(100, 200); + pressureTracker.getDirectoryFileTransferTracker().addTotalTransferTimeInMs(moreTimeToAdd); + assertEquals(timeToAdd + moreTimeToAdd, pressureTracker.getDirectoryFileTransferTracker().getTotalTransferTimeInMs()); + } + /** * Tests whether RemoteSegmentTransferTracker.Stats object generated correctly from RemoteSegmentTransferTracker. * */ @@ -625,7 +657,7 @@ private RemoteSegmentTransferTracker constructTracker() { pressureSettings.getUploadTimeMovingAverageWindowSize() ); segmentPressureTracker.incrementTotalUploadsFailed(); - segmentPressureTracker.addUploadTimeMs(System.nanoTime() / 1_000_000L + randomIntBetween(10, 100)); + segmentPressureTracker.addTimeForCompletedUploadSync(System.nanoTime() / 1_000_000L + randomIntBetween(10, 100)); segmentPressureTracker.addUploadBytes(99); segmentPressureTracker.updateRemoteRefreshTimeMs(System.nanoTime() / 1_000_000L + randomIntBetween(10, 100)); segmentPressureTracker.incrementRejectionCount(); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java index d79e5ae99b696..e164269d96a3d 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java @@ -108,7 +108,7 @@ public void testValidateSegmentUploadLag() { pressureTracker.updateRemoteRefreshSeqNo(3); AtomicLong sum = new AtomicLong(); IntStream.range(0, 20).forEach(i -> { - pressureTracker.addUploadTimeMs(i); + pressureTracker.addTimeForCompletedUploadSync(i); sum.addAndGet(i); }); double avg = (double) sum.get() / 20; From 72ef73ea836fe7454cbc984175cc49f901b46969 Mon Sep 17 00:00:00 2001 From: Shourya Dutta Biswas <114977491+shourya035@users.noreply.github.com> Date: Fri, 25 Aug 2023 17:33:39 +0530 Subject: [PATCH 23/30] [Remote Store] Removing version checks from RemoteSegmentStats (#9545) * Removing version checks Signed-off-by: Shourya Dutta Biswas <114977491+shourya035@users.noreply.github.com> --- .../index/remote/RemoteSegmentStats.java | 37 +++---------------- 1 file changed, 6 insertions(+), 31 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java index ace026e28ab7c..a0f3357d3990a 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java @@ -8,7 +8,6 @@ package org.opensearch.index.remote; -import org.opensearch.Version; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; @@ -87,21 +86,9 @@ public RemoteSegmentStats(StreamInput in) throws IOException { downloadBytesSucceeded = in.readLong(); maxRefreshTimeLag = in.readLong(); maxRefreshBytesLag = in.readLong(); - /* TODO: - Adding version checks here since the base PR of adding remote store stats - in SegmentStats has already been merged and backported to 2.x branch. - - Since this is a new field that is being added, we need to have this check in place - to ensure BWCs don't break. - - This would have to be removed after the new field addition PRs are also backported to 2.x. - If possible we would need to ensure that all field addition PRs are backported at once - */ - if (in.getVersion().onOrAfter(Version.CURRENT)) { - totalRefreshBytesLag = in.readLong(); - totalUploadTime = in.readLong(); - totalDownloadTime = in.readLong(); - } + totalRefreshBytesLag = in.readLong(); + totalUploadTime = in.readLong(); + totalDownloadTime = in.readLong(); } /** @@ -250,21 +237,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(downloadBytesSucceeded); out.writeLong(maxRefreshTimeLag); out.writeLong(maxRefreshBytesLag); - /* TODO: - Adding version checks here since the base PR of adding remote store stats - in SegmentStats has already been merged and backported to 2.x branch. - - Since this is a new field that is being added, we need to have this check in place - to ensure BWCs don't break. - - This would have to be removed after the new field addition PRs are also backported to 2.x. - If possible we would need to ensure that all field addition PRs are backported at once - */ - if (out.getVersion().onOrAfter(Version.CURRENT)) { - out.writeLong(totalRefreshBytesLag); - out.writeLong(totalUploadTime); - out.writeLong(totalDownloadTime); - } + out.writeLong(totalRefreshBytesLag); + out.writeLong(totalUploadTime); + out.writeLong(totalDownloadTime); } @Override From ae0c9bd74d2760e43e52234212f03e9146061522 Mon Sep 17 00:00:00 2001 From: Jay Deng Date: Fri, 25 Aug 2023 05:53:45 -0700 Subject: [PATCH 24/30] Update AggregatorFactory to provide a method to indicate if it supports concurrent search (#9469) * Parameterize parent-join search tests and add coverage for sparse slice case Signed-off-by: Jay Deng * Separate request-based and settings-based concurrent segment search controls and introduce AggregatorFactory method to determine concurrent search support Signed-off-by: Jay Deng * Add supportsConcurrentSegmentSearch override for all AggregatorFactory concrete classes Signed-off-by: Jay Deng * Addressing feedback Signed-off-by: Jay Deng --------- Signed-off-by: Jay Deng --- CHANGELOG.md | 1 + .../stats/MatrixStatsAggregatorFactory.java | 5 +++ .../geogrid/GeoHashGridAggregatorFactory.java | 5 +++ .../geogrid/GeoTileGridAggregatorFactory.java | 5 +++ .../metrics/GeoBoundsAggregatorFactory.java | 5 +++ .../AbstractParentChildTestCase.java | 44 +++++++++++++++++++ .../join/aggregations/ChildrenIT.java | 39 ++++++++++++++++ .../join/aggregations/ParentIT.java | 39 ++++++++++++++++ .../join/query/ChildQuerySearchIT.java | 23 ++++++++++ .../opensearch/join/query/InnerHitsIT.java | 23 ++++++++++ .../join/query/ParentChildTestCase.java | 7 ++- .../ChildrenAggregatorFactory.java | 5 +++ .../aggregations/ParentAggregatorFactory.java | 6 +++ .../action/search/TransportSearchIT.java | 5 +++ .../search/DefaultSearchContext.java | 32 ++++++++++---- .../org/opensearch/search/SearchService.java | 8 ++-- .../aggregations/AggregatorFactories.java | 9 ++++ .../aggregations/AggregatorFactory.java | 11 +++++ .../AdjacencyMatrixAggregatorFactory.java | 4 ++ .../CompositeAggregationFactory.java | 5 +++ .../filter/FilterAggregatorFactory.java | 4 ++ .../filter/FiltersAggregatorFactory.java | 4 ++ .../global/GlobalAggregatorFactory.java | 5 +++ .../AutoDateHistogramAggregatorFactory.java | 5 +++ .../DateHistogramAggregatorFactory.java | 5 +++ .../histogram/HistogramAggregatorFactory.java | 5 +++ ...riableWidthHistogramAggregatorFactory.java | 5 +++ .../missing/MissingAggregatorFactory.java | 5 +++ .../nested/NestedAggregatorFactory.java | 4 ++ .../ReverseNestedAggregatorFactory.java | 5 +++ .../range/AbstractRangeAggregatorFactory.java | 5 +++ .../range/BinaryRangeAggregatorFactory.java | 4 ++ .../range/DateRangeAggregatorFactory.java | 4 ++ .../GeoDistanceRangeAggregatorFactory.java | 5 +++ .../bucket/range/RangeAggregatorFactory.java | 5 +++ .../sampler/DiversifiedAggregatorFactory.java | 5 +++ .../sampler/SamplerAggregatorFactory.java | 4 ++ .../terms/MultiTermsAggregationFactory.java | 5 +++ .../terms/RareTermsAggregatorFactory.java | 5 +++ .../SignificantTermsAggregatorFactory.java | 5 +++ .../SignificantTextAggregatorFactory.java | 5 +++ .../bucket/terms/TermsAggregatorFactory.java | 4 ++ .../metrics/AvgAggregatorFactory.java | 5 +++ .../metrics/CardinalityAggregatorFactory.java | 5 +++ .../ExtendedStatsAggregatorFactory.java | 5 +++ .../metrics/GeoCentroidAggregatorFactory.java | 5 +++ .../metrics/MaxAggregatorFactory.java | 5 +++ ...ianAbsoluteDeviationAggregatorFactory.java | 5 +++ .../metrics/MinAggregatorFactory.java | 5 +++ .../PercentileRanksAggregatorFactory.java | 5 +++ .../metrics/PercentilesAggregatorFactory.java | 5 +++ .../ScriptedMetricAggregatorFactory.java | 5 +++ .../metrics/StatsAggregatorFactory.java | 5 +++ .../metrics/SumAggregatorFactory.java | 5 +++ .../metrics/TopHitsAggregatorFactory.java | 4 ++ .../metrics/ValueCountAggregatorFactory.java | 5 +++ .../metrics/WeightedAvgAggregatorFactory.java | 5 +++ .../internal/FilteredSearchContext.java | 4 +- .../search/internal/SearchContext.java | 4 +- .../query/QueryPhaseSearcherWrapper.java | 4 +- .../opensearch/search/SearchServiceTests.java | 8 ++-- .../internal/ContextIndexSearcherTests.java | 4 +- .../search/query/QueryPhaseTests.java | 4 +- .../DelayedShardAggregationBuilder.java | 5 +++ .../aggregations/AggregatorTestCase.java | 5 +++ .../opensearch/test/TestSearchContext.java | 2 +- 66 files changed, 471 insertions(+), 27 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 99b8120ee93df..350724bb21862 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -155,6 +155,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Improve performance of encoding composite keys in multi-term aggregations ([#9412](https://github.com/opensearch-project/OpenSearch/pull/9412)) - Fix sort related ITs for concurrent search ([#9177](https://github.com/opensearch-project/OpenSearch/pull/9466) - Removing the vec file extension from INDEX_STORE_HYBRID_NIO_EXTENSIONS, to ensure the no performance degradation for vector search via Lucene Engine.([#9528](https://github.com/opensearch-project/OpenSearch/pull/9528))) +- Separate request-based and settings-based concurrent segment search controls and introduce AggregatorFactory method to determine concurrent search support ([#9469](https://github.com/opensearch-project/OpenSearch/pull/9469)) ### Deprecated diff --git a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java index f7ab0db3c9607..24f74f3859157 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java +++ b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java @@ -89,4 +89,9 @@ protected Aggregator doCreateInternal( } return new MatrixStatsAggregator(name, typedValuesSources, searchContext, parent, multiValueMode, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java index 197ab2d99f114..60ee1973c1080 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java @@ -196,4 +196,9 @@ static void registerAggregators(ValuesSourceRegistry.Builder builder) { true ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java index d5a3919684345..54b82f9770b63 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java @@ -194,4 +194,9 @@ static void registerAggregators(ValuesSourceRegistry.Builder builder) { true ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorFactory.java index 780f25ba3d7fb..fc9cce3cf98c1 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorFactory.java @@ -89,4 +89,9 @@ static void registerAggregators(ValuesSourceRegistry.Builder builder) { builder.register(GeoBoundsAggregationBuilder.REGISTRY_KEY, CoreValuesSourceType.GEOPOINT, GeoBoundsAggregator::new, true); builder.register(GeoBoundsAggregationBuilder.REGISTRY_KEY, CoreValuesSourceType.GEO_SHAPE, GeoBoundsGeoShapeAggregator::new, true); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/AbstractParentChildTestCase.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/AbstractParentChildTestCase.java index dac1b313777a6..e049edf843069 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/AbstractParentChildTestCase.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/AbstractParentChildTestCase.java @@ -33,6 +33,11 @@ package org.opensearch.join.aggregations; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchRequestBuilder; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.client.Requests; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; import org.opensearch.join.query.ParentChildTestCase; import org.junit.Before; @@ -44,6 +49,7 @@ import java.util.Set; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; /** * Small base test-class which combines stuff used for Children and Parent aggregation tests @@ -52,6 +58,10 @@ public abstract class AbstractParentChildTestCase extends ParentChildTestCase { protected final Map categoryToControl = new HashMap<>(); protected final Map articleToControl = new HashMap<>(); + public AbstractParentChildTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + @Before public void setupCluster() throws Exception { assertAcked( @@ -154,4 +164,38 @@ private ParentControl(String category) { this.category = category; } } + + // Test when there is 1 child document and 1 parent document per segment. + public void testSparseSegments() throws InterruptedException { + assertAcked( + prepareCreate("sparse").setMapping( + addFieldMappings( + buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "article", "comment"), + "commenter", + "keyword", + "category", + "keyword" + ) + ) + .setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + ); + + List requests = new ArrayList<>(); + requests.add(createIndexRequest("sparse", "article", "article-0", null, "category", List.of("0"))); + indexRandom(true, false, requests); + client().admin().indices().refresh(Requests.refreshRequest("sparse")).actionGet(); + requests = new ArrayList<>(); + requests.add(createIndexRequest("sparse", "comment", "comment-0", "article-0", "commenter", "0")); + indexRandom(true, false, requests); + + SearchResponse searchResponse = getSearchRequest().get(); + assertSearchResponse(searchResponse); + validateSpareSegmentsSearchResponse(searchResponse); + } + + abstract SearchRequestBuilder getSearchRequest(); + + abstract void validateSpareSegmentsSearchResponse(SearchResponse searchResponse); } diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java index 72c502c616ff8..5fc0a202ae45e 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java @@ -31,13 +31,17 @@ package org.opensearch.join.aggregations; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.join.ScoreMode; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.update.UpdateResponse; import org.opensearch.client.Requests; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.SearchHit; import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.aggregations.InternalAggregation; @@ -47,14 +51,18 @@ import org.opensearch.search.sort.SortOrder; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.matchQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; import static org.opensearch.join.aggregations.JoinAggregationBuilders.children; import static org.opensearch.join.query.JoinQueryBuilders.hasChildQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.search.aggregations.AggregationBuilders.topHits; @@ -69,6 +77,23 @@ public class ChildrenIT extends AbstractParentChildTestCase { + public ChildrenIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + public void testChildrenAggs() throws Exception { SearchResponse searchResponse = client().prepareSearch("test") .setQuery(matchQuery("randomized", true)) @@ -407,4 +432,18 @@ public void testPostCollectAllLeafReaders() throws Exception { children = parents.getBuckets().get(0).getAggregations().get("child_docs"); assertThat(children.getDocCount(), equalTo(2L)); } + + @Override + SearchRequestBuilder getSearchRequest() { + return client().prepareSearch("sparse") + .setSize(10000) + .setQuery(matchAllQuery()) + .addAggregation(children("to_comment", "comment").subAggregation(terms("commenters").field("commenter").size(10000))); + } + + @Override + void validateSpareSegmentsSearchResponse(SearchResponse searchResponse) { + Children children = searchResponse.getAggregations().get("to_comment"); + assertEquals(children.getDocCount(), 1); + } } diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java index 351b0beec481b..04703a65aa19d 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java @@ -32,12 +32,18 @@ package org.opensearch.join.aggregations; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.bucket.MultiBucketsAggregation; import org.opensearch.search.aggregations.bucket.terms.Terms; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -47,8 +53,10 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.matchQuery; import static org.opensearch.join.aggregations.JoinAggregationBuilders.parent; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.search.aggregations.AggregationBuilders.topHits; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @@ -56,6 +64,23 @@ public class ParentIT extends AbstractParentChildTestCase { + public ParentIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + public void testSimpleParentAgg() throws Exception { final SearchRequestBuilder searchRequest = client().prepareSearch("test") .setSize(10000) @@ -264,4 +289,18 @@ public void testTermsParentAggTerms() throws Exception { } } } + + @Override + SearchRequestBuilder getSearchRequest() { + return client().prepareSearch("sparse") + .setSize(10000) + .setQuery(matchAllQuery()) + .addAggregation(parent("to_article", "comment").subAggregation(terms("category").field("category").size(10000))); + } + + @Override + void validateSpareSegmentsSearchResponse(SearchResponse searchResponse) { + Parent parentAgg = searchResponse.getAggregations().get("to_article"); + assertEquals(parentAgg.getDocCount(), 1); + } } diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java index 037af62427f14..c43d6352b26f8 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java @@ -31,6 +31,8 @@ package org.opensearch.join.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.join.ScoreMode; import org.opensearch.action.explain.ExplainResponse; import org.opensearch.action.index.IndexRequestBuilder; @@ -42,6 +44,7 @@ import org.opensearch.common.lucene.search.function.FunctionScoreQuery; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.rest.RestStatus; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.IdsQueryBuilder; @@ -65,6 +68,8 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -87,6 +92,7 @@ import static org.opensearch.join.query.JoinQueryBuilders.hasChildQuery; import static org.opensearch.join.query.JoinQueryBuilders.hasParentQuery; import static org.opensearch.join.query.JoinQueryBuilders.parentId; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @@ -100,6 +106,23 @@ public class ChildQuerySearchIT extends ParentChildTestCase { + public ChildQuerySearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + public void testMultiLevelChild() throws Exception { assertAcked( prepareCreate("test").setMapping( diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java index ffcc9cf38545f..39da86c7fd726 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java @@ -32,11 +32,15 @@ package org.opensearch.join.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.util.ArrayUtil; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.InnerHitBuilder; @@ -54,6 +58,7 @@ import org.opensearch.search.sort.SortOrder; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -73,6 +78,7 @@ import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.opensearch.join.query.JoinQueryBuilders.hasChildQuery; import static org.opensearch.join.query.JoinQueryBuilders.hasParentQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @@ -87,6 +93,23 @@ public class InnerHitsIT extends ParentChildTestCase { + public InnerHitsIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected Collection> nodePlugins() { ArrayList> plugins = new ArrayList<>(super.nodePlugins()); diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java index f10b0b26a8669..8c19c0aafe763 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java @@ -41,6 +41,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -50,7 +51,11 @@ import java.util.Map; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public abstract class ParentChildTestCase extends OpenSearchIntegTestCase { +public abstract class ParentChildTestCase extends ParameterizedOpenSearchIntegTestCase { + + public ParentChildTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } @Override protected boolean ignoreExternalCluster() { diff --git a/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ChildrenAggregatorFactory.java b/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ChildrenAggregatorFactory.java index 793b35111cfe2..bbca89fc56820 100644 --- a/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ChildrenAggregatorFactory.java +++ b/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ChildrenAggregatorFactory.java @@ -118,4 +118,9 @@ public String getStatsSubtype() { // Child Aggregation is registered in non-standard way, so it might return child's values type return OTHER_SUBTYPE; } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ParentAggregatorFactory.java b/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ParentAggregatorFactory.java index 40c07c8f53e20..9a21cd1db3200 100644 --- a/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ParentAggregatorFactory.java +++ b/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ParentAggregatorFactory.java @@ -118,4 +118,10 @@ public String getStatsSubtype() { // Parent Aggregation is registered in non-standard way return OTHER_SUBTYPE; } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + // See https://github.com/opensearch-project/OpenSearch/issues/9316 + return false; + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java index 2dffc393ef749..f0a3b5a5901ce 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java @@ -594,6 +594,11 @@ protected Aggregator createInternal( ) throws IOException { return new TestAggregator(name, parent, searchContext); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } }; } diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index ef467d494f694..ef8a6c9f36b0c 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -45,6 +45,7 @@ import org.opensearch.action.search.SearchType; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; +import org.opensearch.common.SetOnce; import org.opensearch.common.lease.Releasables; import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.unit.TimeValue; @@ -183,7 +184,8 @@ final class DefaultSearchContext extends SearchContext { private final QueryShardContext queryShardContext; private final FetchPhase fetchPhase; private final Function requestToAggReduceContextBuilder; - private final boolean useConcurrentSearch; + private final boolean concurrentSearchSettingsEnabled; + private final SetOnce requestShouldUseConcurrentSearch = new SetOnce<>(); DefaultSearchContext( ReaderContext readerContext, @@ -214,14 +216,14 @@ final class DefaultSearchContext extends SearchContext { this.indexShard = readerContext.indexShard(); this.clusterService = clusterService; this.engineSearcher = readerContext.acquireSearcher("search"); - this.useConcurrentSearch = useConcurrentSearch(executor); + this.concurrentSearchSettingsEnabled = evaluateConcurrentSegmentSearchSettings(executor); this.searcher = new ContextIndexSearcher( engineSearcher.getIndexReader(), engineSearcher.getSimilarity(), engineSearcher.getQueryCache(), engineSearcher.getQueryCachingPolicy(), lowLevelCancellation, - useConcurrentSearch ? executor : null, + concurrentSearchSettingsEnabled ? executor : null, this ); this.relativeTimeSupplier = relativeTimeSupplier; @@ -876,11 +878,23 @@ public Profilers getProfilers() { } /** - * Returns concurrent segment search status for the search context + * Returns concurrent segment search status for the search context. This should only be used after request parsing, during which requestShouldUseConcurrentSearch will be set. */ @Override - public boolean isConcurrentSegmentSearchEnabled() { - return useConcurrentSearch; + public boolean shouldUseConcurrentSearch() { + assert requestShouldUseConcurrentSearch.get() != null : "requestShouldUseConcurrentSearch must be set"; + return concurrentSearchSettingsEnabled && Boolean.TRUE.equals(requestShouldUseConcurrentSearch.get()); + } + + /** + * Evaluate if parsed request supports concurrent segment search + */ + public void evaluateRequestShouldUseConcurrentSearch() { + if (aggregations() != null && aggregations().factories() != null) { + requestShouldUseConcurrentSearch.set(aggregations().factories().allFactoriesSupportConcurrentSearch()); + } else { + requestShouldUseConcurrentSearch.set(true); + } } public void setProfilers(Profilers profilers) { @@ -910,7 +924,7 @@ public ReaderContext readerContext() { @Override public InternalAggregation.ReduceContext partialOnShard() { InternalAggregation.ReduceContext rc = requestToAggReduceContextBuilder.apply(request.source()).forPartialReduction(); - rc.setSliceLevel(isConcurrentSegmentSearchEnabled()); + rc.setSliceLevel(shouldUseConcurrentSearch()); return rc; } @@ -929,7 +943,7 @@ public BucketCollectorProcessor bucketCollectorProcessor() { * @return true: use concurrent search * false: otherwise */ - private boolean useConcurrentSearch(Executor concurrentSearchExecutor) { + private boolean evaluateConcurrentSegmentSearchSettings(Executor concurrentSearchExecutor) { if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH) && (clusterService != null) && (concurrentSearchExecutor != null)) { @@ -946,7 +960,7 @@ private boolean useConcurrentSearch(Executor concurrentSearchExecutor) { @Override public int getTargetMaxSliceCount() { - if (isConcurrentSegmentSearchEnabled() == false) { + if (shouldUseConcurrentSearch() == false) { throw new IllegalStateException("Target slice count should not be used when concurrent search is disabled"); } return clusterService.getClusterSettings().get(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING); diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index fac461755acff..a02f9601eb093 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -1237,6 +1237,7 @@ private void processFailure(ReaderContext context, Exception exc) { private void parseSource(DefaultSearchContext context, SearchSourceBuilder source, boolean includeAggregations) { // nothing to parse... if (source == null) { + context.evaluateRequestShouldUseConcurrentSearch(); return; } SearchShardTarget shardTarget = context.shardTarget(); @@ -1283,9 +1284,6 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc if (source.minScore() != null) { context.minimumScore(source.minScore()); } - if (source.profile()) { - context.setProfilers(new Profilers(context.searcher(), context.isConcurrentSegmentSearchEnabled())); - } if (source.timeout() != null) { context.timeout(source.timeout()); } @@ -1419,6 +1417,10 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc final CollapseContext collapseContext = source.collapse().build(queryShardContext); context.collapse(collapseContext); } + context.evaluateRequestShouldUseConcurrentSearch(); + if (source.profile()) { + context.setProfilers(new Profilers(context.searcher(), context.shouldUseConcurrentSearch())); + } } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java index 81fd741e9139c..9b8ebe0b4e5e4 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java @@ -257,6 +257,15 @@ private AggregatorFactories(AggregatorFactory[] factories) { this.factories = factories; } + public boolean allFactoriesSupportConcurrentSearch() { + for (AggregatorFactory factory : factories) { + if (factory.supportsConcurrentSegmentSearch() == false || factory.evaluateChildFactories() == false) { + return false; + } + } + return true; + } + /** * Create all aggregators so that they can be consumed with multiple * buckets. diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java index 05686f35c2166..759d043743978 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java @@ -114,4 +114,15 @@ public AggregatorFactory getParent() { public String getStatsSubtype() { return OTHER_SUBTYPE; } + + /** + * Implementation should override this method and return true if the Aggregator created by the factory works with concurrent segment search execution model + */ + protected boolean supportsConcurrentSegmentSearch() { + return false; + } + + public boolean evaluateChildFactories() { + return factories.allFactoriesSupportConcurrentSearch(); + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java index fe1270e10c80e..99ffb563ba2a8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java @@ -91,4 +91,8 @@ public Aggregator createInternal( return new AdjacencyMatrixAggregator(name, factories, separator, keys, weights, searchContext, parent, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java index 09691a69c75f4..2ff79fb623def 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java @@ -77,4 +77,9 @@ protected Aggregator createInternal( ) throws IOException { return new CompositeAggregator(name, factories, searchContext, parent, metadata, size, sources, afterKey); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java index 4ab573cf0a6b6..55c841f5b9c04 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java @@ -101,4 +101,8 @@ public Aggregator createInternal( return new FilterAggregator(name, () -> this.getWeight(), factories, searchContext, parent, cardinality, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java index 795f81a08d8d5..35d968b789a21 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java @@ -146,4 +146,8 @@ public Aggregator createInternal( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/global/GlobalAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/global/GlobalAggregatorFactory.java index 419ae9f16d9e6..47de1fcda29c9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/global/GlobalAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/global/GlobalAggregatorFactory.java @@ -82,4 +82,9 @@ public Aggregator createInternal( } return new GlobalAggregator(name, factories, searchContext, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java index 7434ef84ee92f..059b88c9475ed 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java @@ -124,4 +124,9 @@ protected Aggregator createUnmapped(SearchContext searchContext, Aggregator pare metadata ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index dd74d83c665de..807ec1ab4e4b7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -148,4 +148,9 @@ protected Aggregator createUnmapped(SearchContext searchContext, Aggregator pare metadata ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java index 321c16cdba970..7506dcde23641 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java @@ -149,4 +149,9 @@ protected Aggregator createUnmapped(SearchContext searchContext, Aggregator pare metadata ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorFactory.java index d9d9a74eb958f..b846bf72ef4c5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorFactory.java @@ -116,4 +116,9 @@ protected Aggregator createUnmapped(SearchContext searchContext, Aggregator pare metadata ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorFactory.java index cfa2bd3f7097c..3032d695a3ee2 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorFactory.java @@ -85,4 +85,9 @@ protected MissingAggregator doCreateInternal( .getAggregator(MissingAggregationBuilder.REGISTRY_KEY, config) .build(name, factories, config, searchContext, parent, cardinality, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java index ca1018795b518..a43d41882e475 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java @@ -100,4 +100,8 @@ public InternalAggregation buildEmptyAggregation() { } } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java index 27cd8a2688836..816f05052b6a2 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java @@ -83,6 +83,11 @@ public Aggregator createInternal( } } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + /** * Unmapped class for reverse nested agg * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java index bfd7845e7e16f..41f2768eb7544 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java @@ -122,4 +122,9 @@ protected Aggregator doCreateInternal( metadata ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java index 0ee440ecc8487..fc4b4273df703 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java @@ -103,4 +103,8 @@ protected Aggregator doCreateInternal( .build(name, factories, config.getValuesSource(), config.format(), ranges, keyed, searchContext, parent, cardinality, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java index d243a89c632d7..dcf6b84164991 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java @@ -72,4 +72,8 @@ public DateRangeAggregatorFactory( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/GeoDistanceRangeAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/GeoDistanceRangeAggregatorFactory.java index 3208d35c6a407..728f43094cf7e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/GeoDistanceRangeAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/GeoDistanceRangeAggregatorFactory.java @@ -172,6 +172,11 @@ protected Aggregator doCreateInternal( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + /** * The source location for the distance calculation * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorFactory.java index 803bceaf57fb5..c58b2e881803c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorFactory.java @@ -73,4 +73,9 @@ public RangeAggregatorFactory( metadata ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java index 41ef823a375c0..5f81c76b69385 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java @@ -159,4 +159,9 @@ public InternalAggregation buildEmptyAggregation() { } }; } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java index fa98c799352a6..d3db8a66ee21f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java @@ -73,4 +73,8 @@ public Aggregator createInternal( return new SamplerAggregator(name, shardSize, factories, searchContext, parent, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java index aa6da630aa9f3..7134999e4aa85 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java @@ -157,6 +157,11 @@ protected Aggregator createInternal( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + /** * Supplier for internal values source * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java index 93b8eca370d46..b5f3abe89ac59 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java @@ -237,6 +237,11 @@ protected Aggregator doCreateInternal( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + /** * Execution mode for rare terms agg * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java index 54fb746b97ebb..f6802a58dfed2 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java @@ -306,6 +306,11 @@ protected Aggregator doCreateInternal( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + /** * The execution mode for the significant terms agg * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java index 8acc69083dea4..81366c212c86c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java @@ -312,4 +312,9 @@ public void close() { Releasables.close(dupSequenceSpotters); } } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 62844b4499dba..a4d73bfd3e634 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -558,4 +558,8 @@ public String toString() { } } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java index 75419b7c64b12..0a09fae1eaebe 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java @@ -90,4 +90,9 @@ protected Aggregator doCreateInternal( .getAggregator(AvgAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java index 47084436d3d4f..980667b45324e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java @@ -89,6 +89,11 @@ protected Aggregator doCreateInternal( .build(name, config, precision(), searchContext, parent, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + private int precision() { return precisionThreshold == null ? HyperLogLogPlusPlus.DEFAULT_PRECISION diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregatorFactory.java index 20203b22b2459..99b3d09517a1f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregatorFactory.java @@ -94,4 +94,9 @@ protected Aggregator doCreateInternal( .getAggregator(ExtendedStatsAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, sigma, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregatorFactory.java index 1d450eeae98d8..a3fc91c6b62fb 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregatorFactory.java @@ -81,6 +81,11 @@ protected Aggregator doCreateInternal( .build(name, config, searchContext, parent, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + static void registerAggregators(ValuesSourceRegistry.Builder builder) { builder.register(GeoCentroidAggregationBuilder.REGISTRY_KEY, CoreValuesSourceType.GEOPOINT, GeoCentroidAggregator::new, true); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java index 96f1af94f2d07..4fe936c8b7797 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java @@ -90,4 +90,9 @@ protected Aggregator doCreateInternal( .getAggregator(MaxAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorFactory.java index 9776595d5a76d..3ef3c2afc7875 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorFactory.java @@ -95,4 +95,9 @@ protected Aggregator doCreateInternal( .getAggregator(MedianAbsoluteDeviationAggregationBuilder.REGISTRY_KEY, config) .build(name, config.getValuesSource(), config.format(), searchContext, parent, metadata, compression); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java index b117f70c81baf..58fbe5edefd12 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java @@ -90,4 +90,9 @@ protected Aggregator doCreateInternal( .getAggregator(MinAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentileRanksAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentileRanksAggregatorFactory.java index 19352d30a5177..d3c18bcad1a43 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentileRanksAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentileRanksAggregatorFactory.java @@ -111,4 +111,9 @@ protected Aggregator doCreateInternal( .getAggregator(PercentileRanksAggregationBuilder.REGISTRY_KEY, config) .build(name, config.getValuesSource(), searchContext, parent, percents, percentilesConfig, keyed, config.format(), metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentilesAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentilesAggregatorFactory.java index e249863e25313..148e26e038923 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentilesAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentilesAggregatorFactory.java @@ -103,4 +103,9 @@ protected Aggregator doCreateInternal( .getAggregator(PercentilesAggregationBuilder.REGISTRY_KEY, config) .build(name, config.getValuesSource(), searchContext, parent, percents, percentilesConfig, keyed, config.format(), metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java index 5c831d60f75a8..58ef54ed64482 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java @@ -124,6 +124,11 @@ public Aggregator createInternal( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + private static Script deepCopyScript(Script script, SearchContext context, Map aggParams) { if (script != null) { Map params = mergeParams(aggParams, deepCopyParams(script.getParams(), context)); diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregatorFactory.java index 0c10df174efa0..0e96e631044dd 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregatorFactory.java @@ -90,4 +90,9 @@ protected Aggregator doCreateInternal( .getAggregator(StatsAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java index b3506ff958833..ef9b93920ba18 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java @@ -90,4 +90,9 @@ protected Aggregator doCreateInternal( .getAggregator(SumAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorFactory.java index e312983cd6d24..ba371327c6893 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorFactory.java @@ -155,4 +155,8 @@ public Aggregator createInternal( return new TopHitsAggregator(searchContext.fetchPhase(), subSearchContext, name, searchContext, parent, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorFactory.java index feed42e911856..4a04dd2e0a932 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorFactory.java @@ -84,4 +84,9 @@ protected Aggregator doCreateInternal( .getAggregator(ValueCountAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregatorFactory.java index 9a27e9801d5fe..111245cae99e5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregatorFactory.java @@ -95,4 +95,9 @@ protected Aggregator doCreateInternal( public String getStatsSubtype() { return configs.get(VALUE_FIELD.getPreferredName()).valueSourceType().typeName(); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java b/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java index 5cd25d3b71704..32de5fc9864ce 100644 --- a/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java @@ -561,8 +561,8 @@ public BucketCollectorProcessor bucketCollectorProcessor() { } @Override - public boolean isConcurrentSegmentSearchEnabled() { - return in.isConcurrentSegmentSearchEnabled(); + public boolean shouldUseConcurrentSearch() { + return in.shouldUseConcurrentSearch(); } @Override diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index 3320462727fce..590ce4b077cbc 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -399,7 +399,7 @@ public final void assignRescoreDocIds(RescoreDocIds rescoreDocIds) { /** * Returns concurrent segment search status for the search context */ - public boolean isConcurrentSegmentSearchEnabled() { + public boolean shouldUseConcurrentSearch() { return false; } @@ -407,7 +407,7 @@ public boolean isConcurrentSegmentSearchEnabled() { * Returns local bucket count thresholds based on concurrent segment search status */ public LocalBucketCountThresholds asLocalBucketCountThresholds(TermsAggregator.BucketCountThresholds bucketCountThresholds) { - if (isConcurrentSegmentSearchEnabled()) { + if (shouldUseConcurrentSearch()) { return new LocalBucketCountThresholds(0, ArrayUtil.MAX_ARRAY_LENGTH - 1); } else { return new LocalBucketCountThresholds(bucketCountThresholds.getShardMinDocCount(), bucketCountThresholds.getShardSize()); diff --git a/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java b/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java index 9336b490a5333..115f7503631c1 100644 --- a/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java +++ b/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java @@ -57,7 +57,7 @@ public boolean searchWith( boolean hasFilterCollector, boolean hasTimeout ) throws IOException { - if (searchContext.isConcurrentSegmentSearchEnabled()) { + if (searchContext.shouldUseConcurrentSearch()) { LOGGER.info("Using concurrent search over segments (experimental)"); return concurrentQueryPhaseSearcher.searchWith(searchContext, searcher, query, collectors, hasFilterCollector, hasTimeout); } else { @@ -72,7 +72,7 @@ public boolean searchWith( */ @Override public AggregationProcessor aggregationProcessor(SearchContext searchContext) { - if (searchContext.isConcurrentSegmentSearchEnabled()) { + if (searchContext.shouldUseConcurrentSearch()) { LOGGER.info("Using concurrent search over segments (experimental)"); return concurrentQueryPhaseSearcher.aggregationProcessor(searchContext); } else { diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index 4259ca9750ac7..dc0eb62b9f0e5 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -1274,7 +1274,8 @@ public void testConcurrentSegmentSearchSearchContext() throws IOException { .get() .getSetting(index, IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey()) ); - assertEquals(concurrentSearchEnabled, searchContext.isConcurrentSegmentSearchEnabled()); + searchContext.evaluateRequestShouldUseConcurrentSearch(); + assertEquals(concurrentSearchEnabled, searchContext.shouldUseConcurrentSearch()); // verify executor nullability with concurrent search enabled/disabled if (concurrentSearchEnabled) { assertNotNull(searchContext.searcher().getExecutor()); @@ -1328,7 +1329,8 @@ public void testConcurrentSegmentSearchIsSetOnceDuringContextCreation() throws I .get(); try (DefaultSearchContext searchContext = service.createSearchContext(request, new TimeValue(System.currentTimeMillis()))) { // verify concurrent search state in context - assertEquals(concurrentSearchSetting, searchContext.isConcurrentSegmentSearchEnabled()); + searchContext.evaluateRequestShouldUseConcurrentSearch(); + assertEquals(concurrentSearchSetting, searchContext.shouldUseConcurrentSearch()); // verify executor state in searcher assertEquals(concurrentSearchSetting, (searchContext.searcher().getExecutor() != null)); @@ -1342,7 +1344,7 @@ public void testConcurrentSegmentSearchIsSetOnceDuringContextCreation() throws I .get(); // verify that concurrent segment search is still set to same expected value for the context - assertEquals(concurrentSearchSetting, searchContext.isConcurrentSegmentSearchEnabled()); + assertEquals(concurrentSearchSetting, searchContext.shouldUseConcurrentSearch()); } } diff --git a/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java b/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java index f8f1ff09a314c..b1f70dfce176c 100644 --- a/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java +++ b/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java @@ -383,7 +383,7 @@ public void testGetSlicesWithNonNullExecutorButCSDisabled() throws Exception { IndexShard indexShard = mock(IndexShard.class); when(searchContext.indexShard()).thenReturn(indexShard); when(searchContext.bucketCollectorProcessor()).thenReturn(SearchContext.NO_OP_BUCKET_COLLECTOR_PROCESSOR); - when(searchContext.isConcurrentSegmentSearchEnabled()).thenReturn(false); + when(searchContext.shouldUseConcurrentSearch()).thenReturn(false); ContextIndexSearcher searcher = new ContextIndexSearcher( directoryReader, IndexSearcher.getDefaultSimilarity(), @@ -406,7 +406,7 @@ public void testGetSlicesWithNonNullExecutorButCSDisabled() throws Exception { mock(ExecutorService.class), searchContext ); - when(searchContext.isConcurrentSegmentSearchEnabled()).thenReturn(true); + when(searchContext.shouldUseConcurrentSearch()).thenReturn(true); when(searchContext.getTargetMaxSliceCount()).thenReturn(4); int expectedSliceCount = 4; IndexSearcher.LeafSlice[] slices = searcher.slices(leaves); diff --git a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java index d773962987682..39126a607f968 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java @@ -1209,7 +1209,7 @@ private static ContextIndexSearcher newContextSearcher(IndexReader reader, Execu IndexShard indexShard = mock(IndexShard.class); when(searchContext.indexShard()).thenReturn(indexShard); when(searchContext.bucketCollectorProcessor()).thenReturn(SearchContext.NO_OP_BUCKET_COLLECTOR_PROCESSOR); - when(searchContext.isConcurrentSegmentSearchEnabled()).thenReturn(executor != null); + when(searchContext.shouldUseConcurrentSearch()).thenReturn(executor != null); if (executor != null) { when(searchContext.getTargetMaxSliceCount()).thenReturn(randomIntBetween(0, 2)); } else { @@ -1232,7 +1232,7 @@ private static ContextIndexSearcher newEarlyTerminationContextSearcher(IndexRead IndexShard indexShard = mock(IndexShard.class); when(searchContext.indexShard()).thenReturn(indexShard); when(searchContext.bucketCollectorProcessor()).thenReturn(SearchContext.NO_OP_BUCKET_COLLECTOR_PROCESSOR); - when(searchContext.isConcurrentSegmentSearchEnabled()).thenReturn(executor != null); + when(searchContext.shouldUseConcurrentSearch()).thenReturn(executor != null); if (executor != null) { when(searchContext.getTargetMaxSliceCount()).thenReturn(randomIntBetween(0, 2)); } else { diff --git a/test/external-modules/delayed-aggs/src/main/java/org/opensearch/search/aggregations/DelayedShardAggregationBuilder.java b/test/external-modules/delayed-aggs/src/main/java/org/opensearch/search/aggregations/DelayedShardAggregationBuilder.java index 02952eb7390dc..006632ca93925 100644 --- a/test/external-modules/delayed-aggs/src/main/java/org/opensearch/search/aggregations/DelayedShardAggregationBuilder.java +++ b/test/external-modules/delayed-aggs/src/main/java/org/opensearch/search/aggregations/DelayedShardAggregationBuilder.java @@ -137,6 +137,11 @@ protected Aggregator createInternal( } while (searchContext.getRelativeTimeInMillis() - start < delay.getMillis()); return factory.create(searchContext, parent, cardinality); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } }; } diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index 3a6147850f090..5c649f1dc832d 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -1059,6 +1059,11 @@ public InternalAggregation buildEmptyAggregation() { } }; } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } }; } diff --git a/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java b/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java index a26488feed6fd..dd4a05b67271c 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java @@ -632,7 +632,7 @@ public Profilers getProfilers() { * Returns concurrent segment search status for the search context */ @Override - public boolean isConcurrentSegmentSearchEnabled() { + public boolean shouldUseConcurrentSearch() { return concurrentSegmentSearchEnabled; } From b76b468999e0379df2b20e0a9a34d645daa8d25b Mon Sep 17 00:00:00 2001 From: Ankit Kala Date: Fri, 25 Aug 2023 18:47:10 +0530 Subject: [PATCH 25/30] Move the changelog entry to correct section (#9543) Signed-off-by: Ankit Kala --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 350724bb21862..91c2e97dae6ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -72,7 +72,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Fixed - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) - Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) -- Add support to clear archived index setting ([#9019](https://github.com/opensearch-project/OpenSearch/pull/9019)) ### Security @@ -168,6 +167,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix range reads in respository-s3 ([9512](https://github.com/opensearch-project/OpenSearch/issues/9512)) - Handle null partSize in OnDemandBlockSnapshotIndexInput ([#9291](https://github.com/opensearch-project/OpenSearch/issues/9291)) - Fix condition to remove index create block ([#9437](https://github.com/opensearch-project/OpenSearch/pull/9437)) +- Add support to clear archived index setting ([#9019](https://github.com/opensearch-project/OpenSearch/pull/9019)) ### Security From 5bbac1461e9e96ac019b8674d37452c4aad1d0e8 Mon Sep 17 00:00:00 2001 From: Ankit Kala Date: Fri, 25 Aug 2023 19:05:59 +0530 Subject: [PATCH 26/30] Fix flaky test ArchivedIndexSettingsIT (#9515) * Fix flaky test ArchivedIndexSettingsIT Signed-off-by: Ankit Kala --- .../settings/ArchivedIndexSettingsIT.java | 30 +++++++++++++++---- 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/ArchivedIndexSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/ArchivedIndexSettingsIT.java index 20b0a6175c562..8dc343abf8da2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/settings/ArchivedIndexSettingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/ArchivedIndexSettingsIT.java @@ -18,7 +18,9 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; +import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.hamcrest.Matchers.startsWith; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, supportsDedicatedMasters = false) @@ -48,12 +50,28 @@ public void testArchiveSettings() throws Exception { internalCluster().restartNode(newClusterManagerNode); // Verify that archived settings exists. - assertTrue( - client().admin().indices().prepareGetSettings("test").get().getIndexToSettings().get("test").hasValue("archived.index.dummy") - ); - assertTrue( - client().admin().indices().prepareGetSettings("test").get().getIndexToSettings().get("test").hasValue("archived.index.dummy2") - ); + assertBusy(() -> { + // Verify that cluster state is in recovered state. + assertFalse(client().admin().cluster().prepareState().get().getState().blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK)); + assertTrue( + client().admin() + .indices() + .prepareGetSettings("test") + .get() + .getIndexToSettings() + .get("test") + .hasValue("archived.index.dummy") + ); + assertTrue( + client().admin() + .indices() + .prepareGetSettings("test") + .get() + .getIndexToSettings() + .get("test") + .hasValue("archived.index.dummy2") + ); + }, 30, TimeUnit.SECONDS); // Archived setting update should fail on open index. IllegalArgumentException exception = expectThrows( From 334b15a0910ad7d0e3e0b3defc30cd037ca5a5e1 Mon Sep 17 00:00:00 2001 From: Bukhtawar Khan Date: Fri, 25 Aug 2023 19:39:01 +0530 Subject: [PATCH 27/30] Rate Limiter integration for remote transfer (#9448) * Rate Limiter integration for remote transfer, introduces repository settings to rate limit remote store uploads and downloads Signed-off-by: Bukhtawar Khan --- CHANGELOG.md | 1 + .../opensearch/remotestore/RemoteStoreIT.java | 2 +- .../remotestore/RemoteStoreRestoreIT.java | 41 +++++ .../multipart/RemoteStoreMultipartIT.java | 46 ++++++ .../org/opensearch/common/StreamLimiter.java | 56 +++++++ .../RateLimitingOffsetRangeInputStream.java | 83 +++++++++++ .../blobstore/RateLimitingInputStream.java | 39 +---- .../index/store/RemoteDirectory.java | 141 +++++++++++++++++- .../store/RemoteSegmentStoreDirectory.java | 128 ++-------------- .../RemoteSegmentStoreDirectoryFactory.java | 20 ++- .../repositories/FilterRepository.java | 10 ++ .../opensearch/repositories/Repository.java | 10 ++ .../blobstore/BlobStoreRepository.java | 105 ++++++++++++- ...teLimitingOffsetRangeInputStreamTests.java | 46 ++++++ .../index/store/RemoteDirectoryTests.java | 89 +++++++++++ .../RemoteSegmentStoreDirectoryTests.java | 12 +- .../RepositoriesServiceTests.java | 10 ++ .../index/shard/RestoreOnlyRepository.java | 10 ++ 18 files changed, 686 insertions(+), 163 deletions(-) create mode 100644 server/src/main/java/org/opensearch/common/StreamLimiter.java create mode 100644 server/src/main/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStream.java create mode 100644 server/src/test/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStreamTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 91c2e97dae6ef..1d906fdf3a416 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -155,6 +155,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix sort related ITs for concurrent search ([#9177](https://github.com/opensearch-project/OpenSearch/pull/9466) - Removing the vec file extension from INDEX_STORE_HYBRID_NIO_EXTENSIONS, to ensure the no performance degradation for vector search via Lucene Engine.([#9528](https://github.com/opensearch-project/OpenSearch/pull/9528))) - Separate request-based and settings-based concurrent segment search controls and introduce AggregatorFactory method to determine concurrent search support ([#9469](https://github.com/opensearch-project/OpenSearch/pull/9469)) +- [Remote Store] Rate limiter integration for remote store uploads and downloads([#9448](https://github.com/opensearch-project/OpenSearch/pull/9448/)) ### Deprecated diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 87ec515ffe740..9a2948861e967 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -38,7 +38,7 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 0) public class RemoteStoreIT extends RemoteStoreBaseIntegTestCase { - private static final String INDEX_NAME = "remote-store-test-idx-1"; + protected final String INDEX_NAME = "remote-store-test-idx-1"; @Override protected Collection> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java index e9d8933961073..60d7eefbb6d9b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java @@ -14,7 +14,10 @@ import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.plugins.Plugin; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; @@ -26,9 +29,11 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.greaterThan; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 0) public class RemoteStoreRestoreIT extends RemoteStoreBaseIntegTestCase { @@ -450,5 +455,41 @@ public void testRTSRestoreDataOnlyInTranslog() throws IOException { testRestoreFlow(0, true, randomIntBetween(1, 5)); } + public void testRateLimitedRemoteDownloads() throws Exception { + assertAcked( + client().admin() + .cluster() + .preparePutRepository(REPOSITORY_NAME) + .setType("fs") + .setSettings( + Settings.builder() + .put("location", randomRepoPath()) + .put("compress", randomBoolean()) + .put("max_remote_download_bytes_per_sec", "2kb") + .put("chunk_size", 200, ByteSizeUnit.BYTES) + + ) + ); + int shardCount = randomIntBetween(1, 3); + prepareCluster(0, 3, INDEX_NAME, 0, shardCount); + Map indexStats = indexData(5, false, INDEX_NAME); + assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); + ensureRed(INDEX_NAME); + restore(INDEX_NAME); + assertBusy(() -> { + long downloadPauseTime = 0L; + for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + downloadPauseTime += repositoriesService.repository(REPOSITORY_NAME).getRemoteDownloadThrottleTimeInNanos(); + } + assertThat(downloadPauseTime, greaterThan(TimeValue.timeValueSeconds(randomIntBetween(5, 10)).nanos())); + }, 30, TimeUnit.SECONDS); + ensureGreen(INDEX_NAME); + // This is required to get updated number from already active shards which were not restored + assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); + assertEquals(0, getNumShards(INDEX_NAME).numReplicas); + verifyRestoredData(indexStats, INDEX_NAME); + } + // TODO: Restore flow - index aliases } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java index a523d5c0f5470..842a576a92a38 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java @@ -8,17 +8,24 @@ package org.opensearch.remotestore.multipart; +import org.opensearch.client.Client; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.plugins.Plugin; import org.opensearch.remotestore.RemoteStoreIT; import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; +import org.opensearch.repositories.RepositoriesService; import java.nio.file.Path; import java.util.Collection; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; public class RemoteStoreMultipartIT extends RemoteStoreIT { @@ -35,4 +42,43 @@ protected void putRepository(Path path) { .setSettings(Settings.builder().put("location", path)) ); } + + public void testRateLimitedRemoteUploads() throws Exception { + internalCluster().startDataOnlyNodes(1); + Client client = client(); + logger.info("--> updating repository"); + Path repositoryLocation = randomRepoPath(); + assertAcked( + client.admin() + .cluster() + .preparePutRepository(REPOSITORY_NAME) + .setType(MockFsRepositoryPlugin.TYPE) + .setSettings( + Settings.builder() + .put("location", repositoryLocation) + .put("compress", randomBoolean()) + .put("max_remote_upload_bytes_per_sec", "1kb") + .put("chunk_size", 100, ByteSizeUnit.BYTES) + ) + ); + + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 10; i++) { + index(INDEX_NAME, "_doc", Integer.toString(i), "foo", "bar" + i); + } + refresh(); + // check if throttling is active + assertBusy(() -> { + long uploadPauseTime = 0L; + for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + uploadPauseTime += repositoriesService.repository(REPOSITORY_NAME).getRemoteUploadThrottleTimeInNanos(); + } + assertThat(uploadPauseTime, greaterThan(TimeValue.timeValueSeconds(randomIntBetween(5, 10)).nanos())); + }, 30, TimeUnit.SECONDS); + + assertThat(client.prepareSearch(INDEX_NAME).setSize(0).get().getHits().getTotalHits().value, equalTo(10L)); + } } diff --git a/server/src/main/java/org/opensearch/common/StreamLimiter.java b/server/src/main/java/org/opensearch/common/StreamLimiter.java new file mode 100644 index 0000000000000..ec203a1c30868 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/StreamLimiter.java @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common; + +import org.apache.lucene.store.RateLimiter; + +import java.io.IOException; +import java.util.function.Supplier; + +/** + * The stream limiter that limits the transfer of bytes + * + * @opensearch.internal + */ +public class StreamLimiter { + + private final Supplier rateLimiterSupplier; + + private final StreamLimiter.Listener listener; + + private int bytesSinceLastRateLimit; + + public StreamLimiter(Supplier rateLimiterSupplier, Listener listener) { + this.rateLimiterSupplier = rateLimiterSupplier; + this.listener = listener; + } + + public void maybePause(int bytes) throws IOException { + bytesSinceLastRateLimit += bytes; + final RateLimiter rateLimiter = rateLimiterSupplier.get(); + if (rateLimiter != null) { + if (bytesSinceLastRateLimit >= rateLimiter.getMinPauseCheckBytes()) { + long pause = rateLimiter.pause(bytesSinceLastRateLimit); + bytesSinceLastRateLimit = 0; + if (pause > 0) { + listener.onPause(pause); + } + } + } + } + + /** + * Internal listener + * + * @opensearch.internal + */ + public interface Listener { + void onPause(long nanos); + } +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStream.java b/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStream.java new file mode 100644 index 0000000000000..b455999bbed0c --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStream.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore.transfer.stream; + +import org.apache.lucene.store.RateLimiter; +import org.opensearch.common.StreamLimiter; + +import java.io.IOException; +import java.util.function.Supplier; + +/** + * Rate Limits an {@link OffsetRangeInputStream} + * + * @opensearch.internal + */ +public class RateLimitingOffsetRangeInputStream extends OffsetRangeInputStream { + + private final StreamLimiter streamLimiter; + + private final OffsetRangeInputStream delegate; + + /** + * The ctor for RateLimitingOffsetRangeInputStream + * @param delegate the underlying {@link OffsetRangeInputStream} + * @param rateLimiterSupplier the supplier for {@link RateLimiter} + * @param listener the listener to be invoked on rate limits + */ + public RateLimitingOffsetRangeInputStream( + OffsetRangeInputStream delegate, + Supplier rateLimiterSupplier, + StreamLimiter.Listener listener + ) { + this.streamLimiter = new StreamLimiter(rateLimiterSupplier, listener); + this.delegate = delegate; + } + + @Override + public int read() throws IOException { + int b = delegate.read(); + streamLimiter.maybePause(1); + return b; + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + int n = delegate.read(b, off, len); + if (n > 0) { + streamLimiter.maybePause(n); + } + return n; + } + + @Override + public synchronized void mark(int readlimit) { + delegate.mark(readlimit); + } + + @Override + public boolean markSupported() { + return delegate.markSupported(); + } + + @Override + public long getFilePointer() throws IOException { + return delegate.getFilePointer(); + } + + @Override + public synchronized void reset() throws IOException { + delegate.reset(); + } + + @Override + public void close() throws IOException { + delegate.close(); + } +} diff --git a/server/src/main/java/org/opensearch/index/snapshots/blobstore/RateLimitingInputStream.java b/server/src/main/java/org/opensearch/index/snapshots/blobstore/RateLimitingInputStream.java index 86ecef1173e48..ee601f96ecee1 100644 --- a/server/src/main/java/org/opensearch/index/snapshots/blobstore/RateLimitingInputStream.java +++ b/server/src/main/java/org/opensearch/index/snapshots/blobstore/RateLimitingInputStream.java @@ -33,6 +33,7 @@ package org.opensearch.index.snapshots.blobstore; import org.apache.lucene.store.RateLimiter; +import org.opensearch.common.StreamLimiter; import java.io.FilterInputStream; import java.io.IOException; @@ -46,45 +47,17 @@ */ public class RateLimitingInputStream extends FilterInputStream { - private final Supplier rateLimiterSupplier; + private final StreamLimiter streamLimiter; - private final Listener listener; - - private long bytesSinceLastRateLimit; - - /** - * Internal listener - * - * @opensearch.internal - */ - public interface Listener { - void onPause(long nanos); - } - - public RateLimitingInputStream(InputStream delegate, Supplier rateLimiterSupplier, Listener listener) { + public RateLimitingInputStream(InputStream delegate, Supplier rateLimiterSupplier, StreamLimiter.Listener listener) { super(delegate); - this.rateLimiterSupplier = rateLimiterSupplier; - this.listener = listener; - } - - private void maybePause(int bytes) throws IOException { - bytesSinceLastRateLimit += bytes; - final RateLimiter rateLimiter = rateLimiterSupplier.get(); - if (rateLimiter != null) { - if (bytesSinceLastRateLimit >= rateLimiter.getMinPauseCheckBytes()) { - long pause = rateLimiter.pause(bytesSinceLastRateLimit); - bytesSinceLastRateLimit = 0; - if (pause > 0) { - listener.onPause(pause); - } - } - } + this.streamLimiter = new StreamLimiter(rateLimiterSupplier, listener); } @Override public int read() throws IOException { int b = super.read(); - maybePause(1); + streamLimiter.maybePause(1); return b; } @@ -92,7 +65,7 @@ public int read() throws IOException { public int read(byte[] b, int off, int len) throws IOException { int n = super.read(b, off, len); if (n > 0) { - maybePause(n); + streamLimiter.maybePause(n); } return n; } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java index a5e02a5baed69..04b5d7eb7c6bd 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java @@ -8,15 +8,30 @@ package org.opensearch.index.store; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.Lock; +import org.opensearch.ExceptionsHelper; import org.opensearch.action.LatchedActionListener; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.VerifyingMultiStreamBlobContainer; +import org.opensearch.common.blobstore.exception.CorruptFileException; +import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; +import org.opensearch.common.blobstore.transfer.stream.OffsetRangeIndexInputStream; +import org.opensearch.common.blobstore.transfer.stream.OffsetRangeInputStream; +import org.opensearch.common.util.ByteUtils; import org.opensearch.core.action.ActionListener; +import org.opensearch.index.store.exception.ChecksumCombinationException; import java.io.FileNotFoundException; import java.io.IOException; @@ -30,7 +45,11 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.UnaryOperator; import java.util.stream.Collectors; +import java.util.zip.CRC32; + +import com.jcraft.jzlib.JZlib; /** * A {@code RemoteDirectory} provides an abstraction layer for storing a list of files to a remote store. @@ -45,12 +64,33 @@ public class RemoteDirectory extends Directory { protected final BlobContainer blobContainer; + protected final UnaryOperator uploadRateLimiter; + + protected final UnaryOperator downloadRateLimiter; + + /** + * Number of bytes in the segment file to store checksum + */ + private static final int SEGMENT_CHECKSUM_BYTES = 8; + + private static final Logger logger = LogManager.getLogger(RemoteDirectory.class); + public BlobContainer getBlobContainer() { return blobContainer; } public RemoteDirectory(BlobContainer blobContainer) { + this(blobContainer, UnaryOperator.identity(), UnaryOperator.identity()); + } + + public RemoteDirectory( + BlobContainer blobContainer, + UnaryOperator uploadRateLimiter, + UnaryOperator downloadRateLimiter + ) { this.blobContainer = blobContainer; + this.uploadRateLimiter = uploadRateLimiter; + this.downloadRateLimiter = downloadRateLimiter; } /** @@ -149,7 +189,7 @@ public IndexInput openInput(String name, IOContext context) throws IOException { InputStream inputStream = null; try { inputStream = blobContainer.readBlob(name); - return new RemoteIndexInput(name, inputStream, fileLength(name)); + return new RemoteIndexInput(name, downloadRateLimiter.apply(inputStream), fileLength(name)); } catch (Exception e) { // Incase the RemoteIndexInput creation fails, close the input stream to avoid file handler leak. if (inputStream != null) inputStream.close(); @@ -259,4 +299,103 @@ public Lock obtainLock(String name) throws IOException { public void delete() throws IOException { blobContainer.delete(); } + + public boolean copyFrom( + Directory from, + String src, + String remoteFileName, + IOContext context, + Runnable postUploadRunner, + ActionListener listener + ) { + if (blobContainer instanceof VerifyingMultiStreamBlobContainer) { + try { + uploadBlob(from, src, remoteFileName, context, postUploadRunner, listener); + } catch (Exception e) { + listener.onFailure(e); + } + return true; + } + return false; + } + + private void uploadBlob( + Directory from, + String src, + String remoteFileName, + IOContext ioContext, + Runnable postUploadRunner, + ActionListener listener + ) throws Exception { + long expectedChecksum = calculateChecksumOfChecksum(from, src); + long contentLength; + try (IndexInput indexInput = from.openInput(src, ioContext)) { + contentLength = indexInput.length(); + } + RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( + src, + remoteFileName, + contentLength, + true, + WritePriority.NORMAL, + (size, position) -> uploadRateLimiter.apply(new OffsetRangeIndexInputStream(from.openInput(src, ioContext), size, position)), + expectedChecksum, + this.getBlobContainer() instanceof VerifyingMultiStreamBlobContainer + ); + ActionListener completionListener = ActionListener.wrap(resp -> { + try { + postUploadRunner.run(); + listener.onResponse(null); + } catch (Exception e) { + logger.error(() -> new ParameterizedMessage("Exception in segment postUpload for file [{}]", src), e); + listener.onFailure(e); + } + }, ex -> { + logger.error(() -> new ParameterizedMessage("Failed to upload blob {}", src), ex); + IOException corruptIndexException = ExceptionsHelper.unwrapCorruption(ex); + if (corruptIndexException != null) { + listener.onFailure(corruptIndexException); + return; + } + Throwable throwable = ExceptionsHelper.unwrap(ex, CorruptFileException.class); + if (throwable != null) { + CorruptFileException corruptFileException = (CorruptFileException) throwable; + listener.onFailure(new CorruptIndexException(corruptFileException.getMessage(), corruptFileException.getFileName())); + return; + } + listener.onFailure(ex); + }); + + completionListener = ActionListener.runBefore(completionListener, () -> { + try { + remoteTransferContainer.close(); + } catch (Exception e) { + logger.warn("Error occurred while closing streams", e); + } + }); + + WriteContext writeContext = remoteTransferContainer.createWriteContext(); + ((VerifyingMultiStreamBlobContainer) blobContainer).asyncBlobUpload(writeContext, completionListener); + } + + private long calculateChecksumOfChecksum(Directory directory, String file) throws IOException { + try (IndexInput indexInput = directory.openInput(file, IOContext.DEFAULT)) { + long storedChecksum = CodecUtil.retrieveChecksum(indexInput); + CRC32 checksumOfChecksum = new CRC32(); + checksumOfChecksum.update(ByteUtils.toByteArrayBE(storedChecksum)); + try { + return JZlib.crc32_combine(storedChecksum, checksumOfChecksum.getValue(), SEGMENT_CHECKSUM_BYTES); + } catch (Exception e) { + throw new ChecksumCombinationException( + "Potentially corrupted file: Checksum combination failed while combining stored checksum " + + "and calculated checksum of stored checksum in segment file: " + + file + + ", directory: " + + directory, + file, + e + ); + } + } + } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index b1077bef5b492..0f6ca2a61b67d 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -12,7 +12,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfo; import org.apache.lucene.index.SegmentInfos; @@ -24,20 +23,11 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.Version; -import org.opensearch.ExceptionsHelper; import org.opensearch.common.UUIDs; -import org.opensearch.common.blobstore.VerifyingMultiStreamBlobContainer; -import org.opensearch.common.blobstore.exception.CorruptFileException; -import org.opensearch.common.blobstore.stream.write.WriteContext; -import org.opensearch.common.blobstore.stream.write.WritePriority; -import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; -import org.opensearch.common.blobstore.transfer.stream.OffsetRangeIndexInputStream; import org.opensearch.common.io.VersionedCodecStreamWrapper; import org.opensearch.common.lucene.store.ByteArrayIndexInput; -import org.opensearch.common.util.ByteUtils; import org.opensearch.core.action.ActionListener; import org.opensearch.index.remote.RemoteStoreUtils; -import org.opensearch.index.store.exception.ChecksumCombinationException; import org.opensearch.index.store.lockmanager.FileLockInfo; import org.opensearch.index.store.lockmanager.RemoteStoreCommitLevelLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; @@ -60,9 +50,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; -import java.util.zip.CRC32; - -import com.jcraft.jzlib.JZlib; /** * A RemoteDirectory extension for remote segment store. We need to make sure we don't overwrite a segment file once uploaded. @@ -83,11 +70,6 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement */ public static final String SEGMENT_NAME_UUID_SEPARATOR = "__"; - /** - * Number of bytes in the segment file to store checksum - */ - private static final int SEGMENT_CHECKSUM_BYTES = 8; - /** * remoteDataDirectory is used to store segment files at path: cluster_UUID/index_UUID/shardId/segments/data */ @@ -433,77 +415,25 @@ public IndexInput openInput(String name, IOContext context) throws IOException { * @param listener Listener to handle upload callback events */ public void copyFrom(Directory from, String src, IOContext context, ActionListener listener) { - if (remoteDataDirectory.getBlobContainer() instanceof VerifyingMultiStreamBlobContainer) { - try { - String remoteFilename = getNewRemoteSegmentFilename(src); - uploadBlob(from, src, remoteFilename, context, listener); - } catch (Exception e) { - listener.onFailure(e); - } - } else { - try { + try { + final String remoteFileName = getNewRemoteSegmentFilename(src); + boolean uploaded = remoteDataDirectory.copyFrom(from, src, remoteFileName, context, () -> { + try { + postUpload(from, src, remoteFileName, getChecksumOfLocalFile(from, src)); + } catch (IOException e) { + throw new RuntimeException("Exception in segment postUpload for file " + src, e); + } + }, listener); + if (uploaded == false) { copyFrom(from, src, src, context); listener.onResponse(null); - } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("Exception while uploading file {} to the remote segment store", src), e); - listener.onFailure(e); } + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("Exception while uploading file {} to the remote segment store", src), e); + listener.onFailure(e); } } - private void uploadBlob(Directory from, String src, String remoteFileName, IOContext ioContext, ActionListener listener) - throws Exception { - long expectedChecksum = calculateChecksumOfChecksum(from, src); - long contentLength; - try (IndexInput indexInput = from.openInput(src, ioContext)) { - contentLength = indexInput.length(); - } - RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( - src, - remoteFileName, - contentLength, - true, - WritePriority.NORMAL, - (size, position) -> new OffsetRangeIndexInputStream(from.openInput(src, ioContext), size, position), - expectedChecksum, - remoteDataDirectory.getBlobContainer() instanceof VerifyingMultiStreamBlobContainer - ); - ActionListener completionListener = ActionListener.wrap(resp -> { - try { - postUpload(from, src, remoteFileName, getChecksumOfLocalFile(from, src)); - listener.onResponse(null); - } catch (Exception e) { - logger.error(() -> new ParameterizedMessage("Exception in segment postUpload for file [{}]", src), e); - listener.onFailure(e); - } - }, ex -> { - logger.error(() -> new ParameterizedMessage("Failed to upload blob {}", src), ex); - IOException corruptIndexException = ExceptionsHelper.unwrapCorruption(ex); - if (corruptIndexException != null) { - listener.onFailure(corruptIndexException); - return; - } - Throwable throwable = ExceptionsHelper.unwrap(ex, CorruptFileException.class); - if (throwable != null) { - CorruptFileException corruptFileException = (CorruptFileException) throwable; - listener.onFailure(new CorruptIndexException(corruptFileException.getMessage(), corruptFileException.getFileName())); - return; - } - listener.onFailure(ex); - }); - - completionListener = ActionListener.runBefore(completionListener, () -> { - try { - remoteTransferContainer.close(); - } catch (Exception e) { - logger.warn("Error occurred while closing streams", e); - } - }); - - WriteContext writeContext = remoteTransferContainer.createWriteContext(); - ((VerifyingMultiStreamBlobContainer) remoteDataDirectory.getBlobContainer()).asyncBlobUpload(writeContext, completionListener); - } - /** * This acquires a lock on a given commit by creating a lock file in lock directory using {@code FileLockInfo} * @@ -579,13 +509,6 @@ String getMetadataFileForCommit(long primaryTerm, long generation) throws IOExce return metadataFiles.get(0); } - public void copyFrom(Directory from, String src, String dest, IOContext context, String checksum) throws IOException { - String remoteFilename; - remoteFilename = getNewRemoteSegmentFilename(dest); - remoteDataDirectory.copyFrom(from, src, remoteFilename, context); - postUpload(from, src, remoteFilename, checksum); - } - private void postUpload(Directory from, String src, String remoteFilename, String checksum) throws IOException { UploadedSegmentMetadata segmentMetadata = new UploadedSegmentMetadata(src, remoteFilename, checksum, from.fileLength(src)); segmentsUploadedToRemoteStore.put(src, segmentMetadata); @@ -597,7 +520,9 @@ private void postUpload(Directory from, String src, String remoteFilename, Strin */ @Override public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { - copyFrom(from, src, dest, context, getChecksumOfLocalFile(from, src)); + String remoteFilename = getNewRemoteSegmentFilename(dest); + remoteDataDirectory.copyFrom(from, src, remoteFilename, context); + postUpload(from, src, remoteFilename, getChecksumOfLocalFile(from, src)); } /** @@ -731,27 +656,6 @@ private String getChecksumOfLocalFile(Directory directory, String file) throws I } } - private long calculateChecksumOfChecksum(Directory directory, String file) throws IOException { - try (IndexInput indexInput = directory.openInput(file, IOContext.DEFAULT)) { - long storedChecksum = CodecUtil.retrieveChecksum(indexInput); - CRC32 checksumOfChecksum = new CRC32(); - checksumOfChecksum.update(ByteUtils.toByteArrayBE(storedChecksum)); - try { - return JZlib.crc32_combine(storedChecksum, checksumOfChecksum.getValue(), SEGMENT_CHECKSUM_BYTES); - } catch (Exception e) { - throw new ChecksumCombinationException( - "Potentially corrupted file: Checksum combination failed while combining stored checksum " - + "and calculated checksum of stored checksum in segment file: " - + file - + ", directory: " - + directory, - file, - e - ); - } - } - } - private String getExistingRemoteFilename(String localFilename) { if (segmentsUploadedToRemoteStore.containsKey(localFilename)) { return segmentsUploadedToRemoteStore.get(localFilename).uploadedFilename; diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index 3de7a706c0688..31b49f6813ad2 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -9,7 +9,6 @@ package org.opensearch.index.store; import org.apache.lucene.store.Directory; -import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.ShardPath; @@ -54,11 +53,18 @@ public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throw public Directory newDirectory(String repositoryName, String indexUUID, String shardId) throws IOException { try (Repository repository = repositoriesService.get().repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; - BlobPath commonBlobPath = ((BlobStoreRepository) repository).basePath(); + BlobStoreRepository blobStoreRepository = ((BlobStoreRepository) repository); + BlobPath commonBlobPath = blobStoreRepository.basePath(); commonBlobPath = commonBlobPath.add(indexUUID).add(shardId).add(SEGMENTS); - RemoteDirectory dataDirectory = createRemoteDirectory(repository, commonBlobPath, "data"); - RemoteDirectory metadataDirectory = createRemoteDirectory(repository, commonBlobPath, "metadata"); + RemoteDirectory dataDirectory = new RemoteDirectory( + blobStoreRepository.blobStore().blobContainer(commonBlobPath.add("data")), + blobStoreRepository::maybeRateLimitRemoteUploadTransfers, + blobStoreRepository::maybeRateLimitRemoteDownloadTransfers + ); + RemoteDirectory metadataDirectory = new RemoteDirectory( + blobStoreRepository.blobStore().blobContainer(commonBlobPath.add("metadata")) + ); RemoteStoreLockManager mdLockManager = RemoteStoreLockManagerFactory.newLockManager( repositoriesService.get(), repositoryName, @@ -72,9 +78,7 @@ public Directory newDirectory(String repositoryName, String indexUUID, String sh } } - private RemoteDirectory createRemoteDirectory(Repository repository, BlobPath commonBlobPath, String extention) { - BlobPath extendedPath = commonBlobPath.add(extention); - BlobContainer dataBlobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(extendedPath); - return new RemoteDirectory(dataBlobContainer); + private RemoteDirectory createRemoteDirectory(BlobStoreRepository repository, BlobPath commonBlobPath, String extension) { + return new RemoteDirectory(repository.blobStore().blobContainer(commonBlobPath.add(extension))); } } diff --git a/server/src/main/java/org/opensearch/repositories/FilterRepository.java b/server/src/main/java/org/opensearch/repositories/FilterRepository.java index 1aba9e25a0dc2..08f8bcb467d03 100644 --- a/server/src/main/java/org/opensearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/opensearch/repositories/FilterRepository.java @@ -137,6 +137,16 @@ public long getRestoreThrottleTimeInNanos() { return in.getRestoreThrottleTimeInNanos(); } + @Override + public long getRemoteUploadThrottleTimeInNanos() { + return in.getRemoteUploadThrottleTimeInNanos(); + } + + @Override + public long getRemoteDownloadThrottleTimeInNanos() { + return in.getRemoteDownloadThrottleTimeInNanos(); + } + @Override public String startVerification() { return in.startVerification(); diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index 862a8de1e3218..76a3b65c9ea55 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -198,6 +198,16 @@ default void deleteSnapshotsAndReleaseLockFiles( */ long getRestoreThrottleTimeInNanos(); + /** + * Returns restore throttle time in nanoseconds + */ + long getRemoteUploadThrottleTimeInNanos(); + + /** + * Returns restore throttle time in nanoseconds + */ + long getRemoteDownloadThrottleTimeInNanos(); + /** * Returns stats on the repository usage */ diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 693022a60cc09..108a022a2612b 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -73,6 +73,8 @@ import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.blobstore.DeleteResult; import org.opensearch.common.blobstore.fs.FsBlobContainer; +import org.opensearch.common.blobstore.transfer.stream.OffsetRangeInputStream; +import org.opensearch.common.blobstore.transfer.stream.RateLimitingOffsetRangeInputStream; import org.opensearch.common.collect.Tuple; import org.opensearch.common.compress.DeflateCompressor; import org.opensearch.common.io.Streams; @@ -295,10 +297,18 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private final RateLimiter restoreRateLimiter; + private final RateLimiter remoteUploadRateLimiter; + + private final RateLimiter remoteDownloadRateLimiter; + private final CounterMetric snapshotRateLimitingTimeInNanos = new CounterMetric(); private final CounterMetric restoreRateLimitingTimeInNanos = new CounterMetric(); + private final CounterMetric remoteDownloadRateLimitingTimeInNanos = new CounterMetric(); + + private final CounterMetric remoteUploadRateLimitingTimeInNanos = new CounterMetric(); + public static final ChecksumBlobStoreFormat GLOBAL_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( "metadata", METADATA_NAME_FORMAT, @@ -398,6 +408,8 @@ protected BlobStoreRepository( this.supportURLRepo = SUPPORT_URL_REPO.get(metadata.settings()); snapshotRateLimiter = getRateLimiter(metadata.settings(), "max_snapshot_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB)); restoreRateLimiter = getRateLimiter(metadata.settings(), "max_restore_bytes_per_sec", ByteSizeValue.ZERO); + remoteUploadRateLimiter = getRateLimiter(metadata.settings(), "max_remote_upload_bytes_per_sec", ByteSizeValue.ZERO); + remoteDownloadRateLimiter = getRateLimiter(metadata.settings(), "max_remote_download_bytes_per_sec", ByteSizeValue.ZERO); readOnly = READONLY_SETTING.get(metadata.settings()); cacheRepositoryData = CACHE_REPOSITORY_DATA.get(metadata.settings()); bufferSize = Math.toIntExact(BUFFER_SIZE_SETTING.get(metadata.settings()).getBytes()); @@ -1778,6 +1790,16 @@ public long getRestoreThrottleTimeInNanos() { return restoreRateLimitingTimeInNanos.count(); } + @Override + public long getRemoteUploadThrottleTimeInNanos() { + return remoteUploadRateLimitingTimeInNanos.count(); + } + + @Override + public long getRemoteDownloadThrottleTimeInNanos() { + return remoteDownloadRateLimitingTimeInNanos.count(); + } + protected void assertSnapshotOrGenericThread() { assert Thread.currentThread().getName().contains('[' + ThreadPool.Names.SNAPSHOT + ']') || Thread.currentThread().getName().contains('[' + ThreadPool.Names.GENERIC + ']') : "Expected current thread [" @@ -3005,20 +3027,75 @@ private static ActionListener fileQueueListener( }); } - private static InputStream maybeRateLimit(InputStream stream, Supplier rateLimiterSupplier, CounterMetric metric) { - return new RateLimitingInputStream(stream, rateLimiterSupplier, metric::inc); + private static void mayBeLogRateLimits(BlobStoreTransferContext context, RateLimiter rateLimiter, long time) { + logger.debug( + () -> new ParameterizedMessage( + "Rate limited blob store transfer, context [{}], for duration [{} ms] for configured rate [{} MBps]", + context, + TimeValue.timeValueNanos(time).millis(), + rateLimiter.getMBPerSec() + ) + ); + } + + private static InputStream maybeRateLimit( + InputStream stream, + Supplier rateLimiterSupplier, + CounterMetric metric, + BlobStoreTransferContext context + ) { + return new RateLimitingInputStream(stream, rateLimiterSupplier, (t) -> { + mayBeLogRateLimits(context, rateLimiterSupplier.get(), t); + metric.inc(t); + }); + } + + private static OffsetRangeInputStream maybeRateLimitRemoteTransfers( + OffsetRangeInputStream offsetRangeInputStream, + Supplier rateLimiterSupplier, + CounterMetric metric, + BlobStoreTransferContext context + ) { + return new RateLimitingOffsetRangeInputStream(offsetRangeInputStream, rateLimiterSupplier, (t) -> { + mayBeLogRateLimits(context, rateLimiterSupplier.get(), t); + metric.inc(t); + }); } public InputStream maybeRateLimitRestores(InputStream stream) { return maybeRateLimit( - maybeRateLimit(stream, () -> restoreRateLimiter, restoreRateLimitingTimeInNanos), + maybeRateLimit(stream, () -> restoreRateLimiter, restoreRateLimitingTimeInNanos, BlobStoreTransferContext.SNAPSHOT_RESTORE), recoverySettings::rateLimiter, - restoreRateLimitingTimeInNanos + restoreRateLimitingTimeInNanos, + BlobStoreTransferContext.SNAPSHOT_RESTORE + ); + } + + public OffsetRangeInputStream maybeRateLimitRemoteUploadTransfers(OffsetRangeInputStream offsetRangeInputStream) { + return maybeRateLimitRemoteTransfers( + offsetRangeInputStream, + () -> remoteUploadRateLimiter, + remoteUploadRateLimitingTimeInNanos, + BlobStoreTransferContext.REMOTE_UPLOAD + ); + } + + public InputStream maybeRateLimitRemoteDownloadTransfers(InputStream inputStream) { + return maybeRateLimit( + maybeRateLimit( + inputStream, + () -> remoteDownloadRateLimiter, + remoteDownloadRateLimitingTimeInNanos, + BlobStoreTransferContext.REMOTE_DOWNLOAD + ), + recoverySettings::rateLimiter, + remoteDownloadRateLimitingTimeInNanos, + BlobStoreTransferContext.REMOTE_DOWNLOAD ); } public InputStream maybeRateLimitSnapshots(InputStream stream) { - return maybeRateLimit(stream, () -> snapshotRateLimiter, snapshotRateLimitingTimeInNanos); + return maybeRateLimit(stream, () -> snapshotRateLimiter, snapshotRateLimitingTimeInNanos, BlobStoreTransferContext.SNAPSHOT); } @Override @@ -3379,4 +3456,22 @@ private static final class ShardSnapshotMetaDeleteResult { this.blobsToDelete = blobsToDelete; } } + + enum BlobStoreTransferContext { + REMOTE_UPLOAD("remote_upload"), + REMOTE_DOWNLOAD("remote_download"), + SNAPSHOT("snapshot"), + SNAPSHOT_RESTORE("snapshot_restore"); + + private final String name; + + BlobStoreTransferContext(String name) { + this.name = name; + } + + @Override + public String toString() { + return name; + } + } } diff --git a/server/src/test/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStreamTests.java b/server/src/test/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStreamTests.java new file mode 100644 index 0000000000000..fc2eba4c35e2a --- /dev/null +++ b/server/src/test/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStreamTests.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore.transfer.stream; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.NIOFSDirectory; +import org.apache.lucene.store.RateLimiter; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; + +public class RateLimitingOffsetRangeInputStreamTests extends ResettableCheckedInputStreamBaseTest { + + private Directory directory; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + directory = new NIOFSDirectory(testFile.getParent()); + } + + @Override + protected OffsetRangeInputStream getOffsetRangeInputStream(long size, long position) throws IOException { + return new RateLimitingOffsetRangeInputStream( + new OffsetRangeIndexInputStream(directory.openInput(testFile.getFileName().toString(), IOContext.DEFAULT), size, position), + () -> new RateLimiter.SimpleRateLimiter(randomIntBetween(10, 20)), + (t) -> {} + ); + } + + @Override + @After + public void tearDown() throws Exception { + directory.close(); + super.tearDown(); + } +} diff --git a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java index b220b0891f11d..7655690685889 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java @@ -8,12 +8,17 @@ package org.opensearch.index.store; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.action.LatchedActionListener; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.VerifyingMultiStreamBlobContainer; +import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.support.PlainBlobMetadata; import org.opensearch.core.action.ActionListener; import org.opensearch.test.OpenSearchTestCase; @@ -28,9 +33,14 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import java.util.stream.Stream; +import org.mockito.Mockito; + import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; @@ -58,6 +68,85 @@ public void testListAllEmpty() throws IOException { assertArrayEquals(expectedFileName, actualFileNames); } + public void testCopyFrom() throws IOException, InterruptedException { + AtomicReference postUploadInvoked = new AtomicReference<>(false); + String filename = "_100.si"; + VerifyingMultiStreamBlobContainer blobContainer = mock(VerifyingMultiStreamBlobContainer.class); + Mockito.doAnswer(invocation -> { + ActionListener completionListener = invocation.getArgument(1); + completionListener.onResponse(null); + return null; + }).when(blobContainer).asyncBlobUpload(any(WriteContext.class), any()); + + Directory storeDirectory = LuceneTestCase.newDirectory(); + IndexOutput indexOutput = storeDirectory.createOutput(filename, IOContext.DEFAULT); + indexOutput.writeString("Hello World!"); + CodecUtil.writeFooter(indexOutput); + indexOutput.close(); + storeDirectory.sync(List.of(filename)); + + CountDownLatch countDownLatch = new CountDownLatch(1); + RemoteDirectory remoteDirectory = new RemoteDirectory(blobContainer); + remoteDirectory.copyFrom( + storeDirectory, + filename, + filename, + IOContext.READ, + () -> postUploadInvoked.set(true), + new ActionListener<>() { + @Override + public void onResponse(Void t) { + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("Listener responded with exception" + e); + } + } + ); + assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); + assertTrue(postUploadInvoked.get()); + storeDirectory.close(); + } + + public void testCopyFromWithException() throws IOException, InterruptedException { + AtomicReference postUploadInvoked = new AtomicReference<>(false); + String filename = "_100.si"; + VerifyingMultiStreamBlobContainer blobContainer = mock(VerifyingMultiStreamBlobContainer.class); + Mockito.doAnswer(invocation -> { + ActionListener completionListener = invocation.getArgument(1); + completionListener.onResponse(null); + return null; + }).when(blobContainer).asyncBlobUpload(any(WriteContext.class), any()); + + Directory storeDirectory = LuceneTestCase.newDirectory(); + + CountDownLatch countDownLatch = new CountDownLatch(1); + RemoteDirectory remoteDirectory = new RemoteDirectory(blobContainer); + remoteDirectory.copyFrom( + storeDirectory, + filename, + filename, + IOContext.READ, + () -> postUploadInvoked.set(true), + new ActionListener<>() { + @Override + public void onResponse(Void t) { + fail("Listener responded with success"); + } + + @Override + public void onFailure(Exception e) { + countDownLatch.countDown(); + } + } + ); + assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); + assertFalse(postUploadInvoked.get()); + storeDirectory.close(); + } + public void testListAll() throws IOException { Map fileNames = Stream.of("abc", "xyz", "pqr", "lmn", "jkl") .collect(Collectors.toMap(filename -> filename, filename -> new PlainBlobMetadata(filename, 100))); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 91154e5b77641..44dfb44eb9a15 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -517,6 +517,15 @@ public void onFailure(Exception e) {} public void testCopyFilesFromMultipartIOException() throws Exception { String filename = "_100.si"; + VerifyingMultiStreamBlobContainer blobContainer = mock(VerifyingMultiStreamBlobContainer.class); + remoteDataDirectory = new RemoteDirectory(blobContainer); + remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( + remoteDataDirectory, + remoteMetadataDirectory, + mdLockManager, + threadPool + ); + populateMetadata(); remoteSegmentStoreDirectory.init(); @@ -528,9 +537,6 @@ public void testCopyFilesFromMultipartIOException() throws Exception { storeDirectory.sync(List.of(filename)); assertFalse(remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().containsKey(filename)); - - VerifyingMultiStreamBlobContainer blobContainer = mock(VerifyingMultiStreamBlobContainer.class); - when(remoteDataDirectory.getBlobContainer()).thenReturn(blobContainer); Mockito.doAnswer(invocation -> { ActionListener completionListener = invocation.getArgument(1); completionListener.onFailure(new Exception("Test exception")); diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index c574c6d516fd3..62bc4016d892d 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -281,6 +281,16 @@ public long getRestoreThrottleTimeInNanos() { return 0; } + @Override + public long getRemoteUploadThrottleTimeInNanos() { + return 0; + } + + @Override + public long getRemoteDownloadThrottleTimeInNanos() { + return 0; + } + @Override public String startVerification() { return null; diff --git a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java index 38520e9292206..fbee13ab3b551 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java @@ -150,6 +150,16 @@ public long getRestoreThrottleTimeInNanos() { return 0; } + @Override + public long getRemoteUploadThrottleTimeInNanos() { + return 0; + } + + @Override + public long getRemoteDownloadThrottleTimeInNanos() { + return 0; + } + @Override public String startVerification() { return null; From 3a8bbe9095ec028e74ec9a2700bf417ca5ed2fb5 Mon Sep 17 00:00:00 2001 From: Ketan Verma <9292653+ketanv3@users.noreply.github.com> Date: Fri, 25 Aug 2023 21:50:53 +0530 Subject: [PATCH 28/30] Performance improvements for BytesRefHash (#8788) * Performance improvements for BytesRefHash Signed-off-by: Ketan Verma * Replace BytesRefHash and clean up alternative implementations Signed-off-by: Ketan Verma * Added t1ha1 to replace xxh3 hash function Signed-off-by: Ketan Verma * Update t1ha1 to use unsignedMultiplyHigh on JDK 18 and above Signed-off-by: Ketan Verma * Add link to the reference implementation for t1ha1 Signed-off-by: Ketan Verma * Annotate t1ha1 with @opensearch.internal Signed-off-by: Ketan Verma * Run spotless Signed-off-by: Ketan Verma * Add pre-computed hashes to speed up reinserts Signed-off-by: Ketan Verma * Refactor HashFunctionTestCase Signed-off-by: Ketan Verma --------- Signed-off-by: Ketan Verma --- CHANGELOG.md | 1 + .../common/hash/HashFunctionBenchmark.java | 172 +++++++++ .../common/util/BytesRefHashBenchmark.java | 249 +++++++++++++ .../java/org/opensearch/common/Numbers.java | 8 + .../org/opensearch/common/hash/T1ha1.java | 277 ++++++++++++++ .../opensearch/common/hash/package-info.java | 12 + .../opensearch/common/hash/T1Ha1Tests.java | 312 ++++++++++++++++ .../opensearch/common/util/BytesRefHash.java | 346 ++++++++++++------ .../common/util/ReorganizingLongHash.java | 14 +- .../bucket/terms/BytesKeyedBucketOrds.java | 4 +- .../bucket/terms/SignificanceLookup.java | 2 +- .../terms/StringRareTermsAggregator.java | 2 +- .../org/opensearch/common/NumbersTests.java | 21 ++ .../common/util/BytesRefHashTests.java | 61 +-- .../common/hash/AvalancheStats.java | 63 ++++ .../common/hash/HashFunctionTestCase.java | 79 ++++ .../hash/HashFunctionTestCaseTests.java | 68 ++++ 17 files changed, 1547 insertions(+), 144 deletions(-) create mode 100644 benchmarks/src/main/java/org/opensearch/common/hash/HashFunctionBenchmark.java create mode 100644 benchmarks/src/main/java/org/opensearch/common/util/BytesRefHashBenchmark.java create mode 100644 libs/common/src/main/java/org/opensearch/common/hash/T1ha1.java create mode 100644 libs/common/src/main/java/org/opensearch/common/hash/package-info.java create mode 100644 libs/common/src/test/java/org/opensearch/common/hash/T1Ha1Tests.java create mode 100644 test/framework/src/main/java/org/opensearch/common/hash/AvalancheStats.java create mode 100644 test/framework/src/main/java/org/opensearch/common/hash/HashFunctionTestCase.java create mode 100644 test/framework/src/test/java/org/opensearch/common/hash/HashFunctionTestCaseTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 1d906fdf3a416..555d51cb1d066 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -137,6 +137,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Replace the deprecated IndexReader APIs with new storedFields() & termVectors() ([#7792](https://github.com/opensearch-project/OpenSearch/pull/7792)) - [Remote Store] Add support to restore only unassigned shards of an index ([#8792](https://github.com/opensearch-project/OpenSearch/pull/8792)) - Add safeguard limits for file cache during node level allocation ([#8208](https://github.com/opensearch-project/OpenSearch/pull/8208)) +- Performance improvements for BytesRefHash ([#8788](https://github.com/opensearch-project/OpenSearch/pull/8788)) - Add support for aggregation profiler with concurrent aggregation ([#8801](https://github.com/opensearch-project/OpenSearch/pull/8801)) - [Remove] Deprecated Fractional ByteSizeValue support #9005 ([#9005](https://github.com/opensearch-project/OpenSearch/pull/9005)) - Add support for aggregation profiler with concurrent aggregation ([#8801](https://github.com/opensearch-project/OpenSearch/pull/8801)) diff --git a/benchmarks/src/main/java/org/opensearch/common/hash/HashFunctionBenchmark.java b/benchmarks/src/main/java/org/opensearch/common/hash/HashFunctionBenchmark.java new file mode 100644 index 0000000000000..8842337a468a1 --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/common/hash/HashFunctionBenchmark.java @@ -0,0 +1,172 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import org.apache.lucene.util.StringHelper; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.Random; + +@Fork(value = 3) +@Warmup(iterations = 1, time = 1) +@Measurement(iterations = 3, time = 3) +@BenchmarkMode(Mode.Throughput) +public class HashFunctionBenchmark { + + @Benchmark + public void hash(Blackhole bh, Options opts) { + bh.consume(opts.type.hash(opts.data)); + } + + @State(Scope.Benchmark) + public static class Options { + @Param({ "MURMUR3", "T1HA1" }) + public Type type; + + @Param({ + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "12", + "14", + "16", + "18", + "21", + "24", + "28", + "32", + "36", + "41", + "47", + "54", + "62", + "71", + "81", + "90", + "100", + "112", + "125", + "139", + "156", + "174", + "194", + "220", + "245", + "272", + "302", + "339", + "384", + "431", + "488", + "547", + "608", + "675", + "763", + "863", + "967", + "1084", + "1225", + "1372", + "1537", + "1737", + "1929", + "2142", + "2378", + "2664", + "3011", + "3343", + "3778", + "4232", + "4783", + "5310", + "5895", + "6662", + "7529", + "8508", + "9444", + "10483", + "11741", + "13150", + "14597", + "16495", + "18475", + "20877", + "23383", + "25956", + "29071", + "32560", + "36142", + "40841", + "46151", + "52151", + "57888", + "65414", + "72610", + "82050", + "91076", + "102006", + "114247", + "127957", + "143312", + "159077", + "176576", + "199531", + "223475", + "250292", + "277825", + "313943", + "351617", + "393812" }) + public Integer length; + public byte[] data; + + @Setup + public void setup() { + data = new byte[length]; + new Random(0).nextBytes(data); + } + } + + public enum Type { + MURMUR3((data, offset, length) -> StringHelper.murmurhash3_x86_32(data, offset, length, 0)), + T1HA1((data, offset, length) -> T1ha1.hash(data, offset, length, 0)); + + private final Hasher hasher; + + Type(Hasher hasher) { + this.hasher = hasher; + } + + public long hash(byte[] data) { + return hasher.hash(data, 0, data.length); + } + } + + @FunctionalInterface + interface Hasher { + long hash(byte[] data, int offset, int length); + } +} diff --git a/benchmarks/src/main/java/org/opensearch/common/util/BytesRefHashBenchmark.java b/benchmarks/src/main/java/org/opensearch/common/util/BytesRefHashBenchmark.java new file mode 100644 index 0000000000000..fef12b6d9f84a --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/common/util/BytesRefHashBenchmark.java @@ -0,0 +1,249 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.StringHelper; +import org.opensearch.common.hash.T1ha1; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.HashSet; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; +import java.util.stream.Stream; + +@Fork(value = 3) +@Warmup(iterations = 1, time = 2) +@Measurement(iterations = 3, time = 5) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +public class BytesRefHashBenchmark { + private static final int NUM_TABLES = 20; // run across many tables so that caches aren't effective + private static final int NUM_HITS = 1_000_000; // num hits per table + + @Benchmark + public void add(Blackhole bh, Options opts) { + HashTable[] tables = Stream.generate(opts.type::create).limit(NUM_TABLES).toArray(HashTable[]::new); + + for (int hit = 0; hit < NUM_HITS; hit++) { + BytesRef key = opts.keys[hit % opts.keys.length]; + for (HashTable table : tables) { + bh.consume(table.add(key)); + } + } + + Releasables.close(tables); + } + + @State(Scope.Benchmark) + public static class Options { + @Param({ "MURMUR3", "T1HA1" }) + public Type type; + + @Param({ + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "10", + "12", + "14", + "16", + "19", + "22", + "25", + "29", + "33", + "38", + "43", + "50", + "57", + "65", + "75", + "86", + "97", + "109", + "124", + "141", + "161", + "182", + "204", + "229", + "262", + "297", + "336", + "380", + "430", + "482", + "550", + "610", + "704", + "801", + "914", + "1042", + "1178", + "1343", + "1532", + "1716", + "1940", + "2173", + "2456", + "2751", + "3082", + "3514", + "4006", + "4487", + "5026", + "5730", + "6418", + "7317", + "8196", + "9180", + "10374", + "11723", + "13247", + "14837", + "16915", + "19114", + "21599", + "24623", + "28071", + "32001", + "36482", + "41590", + "46581", + "52637", + "58954", + "67208", + "76618", + "86579", + "97835", + "109576", + "122726", + "138681", + "156710", + "175516", + "198334", + "222135", + "248792", + "281135", + "320494", + "365364", + "409208", + "466498", + "527143", + "595672", + "667153", + "753883", + "851888", + "971153" }) + public Integer size; + + @Param({ "5", "28", "59", "105" }) + public Integer length; + + private BytesRef[] keys; + + @Setup + public void setup() { + assert size <= Math.pow(26, length) : "key length too small to generate the required number of keys"; + // Seeding with size will help produce deterministic results for the same size, and avoid similar + // looking clusters for different sizes, in case one hash function got unlucky. + Random random = new Random(size); + Set seen = new HashSet<>(); + keys = new BytesRef[size]; + for (int i = 0; i < size; i++) { + BytesRef key; + do { + key = new BytesRef( + random.ints(97, 123) + .limit(length) + .collect(StringBuilder::new, StringBuilder::appendCodePoint, StringBuilder::append) + .toString() + ); + } while (seen.contains(key)); + keys[i] = key; + seen.add(key); + } + } + } + + public enum Type { + MURMUR3(() -> new HashTable() { + private final BytesRefHash table = new BytesRefHash(1, 0.6f, key -> { + // Repeating the lower bits into upper bits to make the fingerprint work. + // Alternatively, use a 64-bit murmur3 hash, but that won't represent the baseline. + long h = StringHelper.murmurhash3_x86_32(key.bytes, key.offset, key.length, 0) & 0xFFFFFFFFL; + return h | (h << 32); + }, BigArrays.NON_RECYCLING_INSTANCE); + + @Override + public long add(BytesRef key) { + return table.add(key); + } + + @Override + public void close() { + table.close(); + } + }), + + T1HA1(() -> new HashTable() { + private final BytesRefHash table = new BytesRefHash( + 1, + 0.6f, + key -> T1ha1.hash(key.bytes, key.offset, key.length, 0), + BigArrays.NON_RECYCLING_INSTANCE + ); + + @Override + public long add(BytesRef key) { + return table.add(key); + } + + @Override + public void close() { + table.close(); + } + }); + + private final Supplier supplier; + + Type(Supplier supplier) { + this.supplier = supplier; + } + + public HashTable create() { + return supplier.get(); + } + } + + interface HashTable extends Releasable { + long add(BytesRef key); + } +} diff --git a/libs/common/src/main/java/org/opensearch/common/Numbers.java b/libs/common/src/main/java/org/opensearch/common/Numbers.java index 084e52a41f8b1..d5a364a4a934e 100644 --- a/libs/common/src/main/java/org/opensearch/common/Numbers.java +++ b/libs/common/src/main/java/org/opensearch/common/Numbers.java @@ -260,4 +260,12 @@ public static double unsignedLongToDouble(long value) { // want to replace that with 1 in the shifted value for correct rounding. return (double) ((value >>> 1) | (value & 1)) * 2.0; } + + /** + * Return the strictly greater next power of two for the given value. + * For zero and negative numbers, it returns 1. + */ + public static long nextPowerOfTwo(long value) { + return 1L << (Long.SIZE - Long.numberOfLeadingZeros(value)); + } } diff --git a/libs/common/src/main/java/org/opensearch/common/hash/T1ha1.java b/libs/common/src/main/java/org/opensearch/common/hash/T1ha1.java new file mode 100644 index 0000000000000..07b2306eda4e5 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/hash/T1ha1.java @@ -0,0 +1,277 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import org.opensearch.common.annotation.InternalApi; + +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.lang.invoke.VarHandle; +import java.nio.ByteOrder; + +import static java.lang.Long.rotateRight; + +/** + * t1ha: Fast Positive Hash + * + *

+ * Implements t1ha1; + * a fast portable hash function with reasonable quality for checksums, hash tables, and thin fingerprinting. + * + *

+ * To overcome language and performance limitations, this implementation differs slightly from the + * reference implementation in C++, + * so the returned values may vary before JDK 18. + * + *

+ * Intended for little-endian systems but returns the same result on big-endian, albeit marginally slower. + * + * @opensearch.internal + */ +@InternalApi +public final class T1ha1 { + private static final long SEED = System.nanoTime(); + private static final Mux64 MUX_64_IMPL = fastestMux64Impl(); + + private static final VarHandle LONG_HANDLE = MethodHandles.byteArrayViewVarHandle(long[].class, ByteOrder.LITTLE_ENDIAN); + private static final VarHandle INT_HANDLE = MethodHandles.byteArrayViewVarHandle(int[].class, ByteOrder.LITTLE_ENDIAN); + private static final VarHandle SHORT_HANDLE = MethodHandles.byteArrayViewVarHandle(short[].class, ByteOrder.LITTLE_ENDIAN); + + // "Magic" primes: + private static final long p0 = 0xEC99BF0D8372CAABL; + private static final long p1 = 0x82434FE90EDCEF39L; + private static final long p2 = 0xD4F06DB99D67BE4BL; + private static final long p3 = 0xBD9CACC22C6E9571L; + private static final long p4 = 0x9C06FAF4D023E3ABL; + private static final long p5 = 0xC060724A8424F345L; + private static final long p6 = 0xCB5AF53AE3AAAC31L; + + // Rotations: + private static final int s0 = 41; + private static final int s1 = 17; + private static final int s2 = 31; + + /** + * No public constructor. + */ + private T1ha1() {} + + /** + * Returns the hash code for the specified range of the given {@code byte} array. + * @param input the input byte array + * @param offset the starting offset + * @param length the length of the range + * @return hash code + */ + public static long hash(byte[] input, int offset, int length) { + return hash(input, offset, length, SEED); + } + + /** + * Returns the hash code for the specified range of the given {@code byte} array. + * @param input the input byte array + * @param offset the starting offset + * @param length the length of the range + * @param seed customized seed + * @return hash code + */ + public static long hash(byte[] input, int offset, int length, long seed) { + long a = seed; + long b = length; + + if (length > 32) { + long c = rotateRight(length, s1) + seed; + long d = length ^ rotateRight(seed, s1); + + do { + long w0 = fetch64(input, offset); + long w1 = fetch64(input, offset + 8); + long w2 = fetch64(input, offset + 16); + long w3 = fetch64(input, offset + 24); + + long d02 = w0 ^ rotateRight(w2 + d, s1); + long c13 = w1 ^ rotateRight(w3 + c, s1); + c += a ^ rotateRight(w0, s0); + d -= b ^ rotateRight(w1, s2); + a ^= p1 * (d02 + w3); + b ^= p0 * (c13 + w2); + + offset += 32; + length -= 32; + } while (length >= 32); + + a ^= p6 * (rotateRight(c, s1) + d); + b ^= p5 * (rotateRight(d, s1) + c); + } + + return h32(input, offset, length, a, b); + } + + /** + * Computes the hash of up to 32 bytes. + * Constants in the switch expression are dense; JVM will use them as indices into a table of + * instruction pointers (tableswitch instruction), making lookups really fast. + */ + @SuppressWarnings("fallthrough") + private static long h32(byte[] input, int offset, int length, long a, long b) { + switch (length) { + default: + b += mux64(fetch64(input, offset), p4); + offset += 8; + length -= 8; + case 24: + case 23: + case 22: + case 21: + case 20: + case 19: + case 18: + case 17: + a += mux64(fetch64(input, offset), p3); + offset += 8; + length -= 8; + case 16: + case 15: + case 14: + case 13: + case 12: + case 11: + case 10: + case 9: + b += mux64(fetch64(input, offset), p2); + offset += 8; + length -= 8; + case 8: + case 7: + case 6: + case 5: + case 4: + case 3: + case 2: + case 1: + a += mux64(tail64(input, offset, length), p1); + case 0: + // Final weak avalanche + return mux64(rotateRight(a + b, s1), p4) + mix64(a ^ b, p0); + } + } + + /** + * XOR the high and low parts of the full 128-bit product. + */ + private static long mux64(long a, long b) { + return MUX_64_IMPL.mux64(a, b); + } + + /** + * XOR-MUL-XOR bit-mixer. + */ + private static long mix64(long a, long b) { + a *= b; + return a ^ rotateRight(a, s0); + } + + /** + * Reads "length" bytes starting at "offset" in little-endian order; returned as long. + * It is assumed that the length is between 1 and 8 (inclusive); but no defensive checks are made as such. + */ + private static long tail64(byte[] input, int offset, int length) { + switch (length) { + case 1: + return fetch8(input, offset); + case 2: + return fetch16(input, offset); + case 3: + return fetch16(input, offset) | (fetch8(input, offset + 2) << 16); + case 4: + return fetch32(input, offset); + case 5: + return fetch32(input, offset) | (fetch8(input, offset + 4) << 32); + case 6: + return fetch32(input, offset) | (fetch16(input, offset + 4) << 32); + case 7: + // This is equivalent to: + // return fetch32(input, offset) | (fetch16(input, offset + 4) << 32) | (fetch8(input, offset + 6) << 48); + // But reading two ints overlapping by one byte is faster due to lesser instructions. + return fetch32(input, offset) | (fetch32(input, offset + 3) << 24); + default: + return fetch64(input, offset); + } + } + + /** + * Reads a 64-bit long. + */ + private static long fetch64(byte[] input, int offset) { + return (long) LONG_HANDLE.get(input, offset); + } + + /** + * Reads a 32-bit unsigned integer, returned as long. + */ + private static long fetch32(byte[] input, int offset) { + return (int) INT_HANDLE.get(input, offset) & 0xFFFFFFFFL; + } + + /** + * Reads a 16-bit unsigned short, returned as long. + */ + private static long fetch16(byte[] input, int offset) { + return (short) SHORT_HANDLE.get(input, offset) & 0xFFFFL; + } + + /** + * Reads an 8-bit unsigned byte, returned as long. + */ + private static long fetch8(byte[] input, int offset) { + return input[offset] & 0xFFL; + } + + /** + * The implementation of mux64. + */ + @FunctionalInterface + private interface Mux64 { + long mux64(long a, long b); + } + + /** + * Provides the fastest available implementation of mux64 on this platform. + * + *

+ * Ideally, the following should be returned to match the reference implementation: + * {@code Math.unsignedMultiplyHigh(a, b) ^ (a * b)} + * + *

+ * Since unsignedMultiplyHigh isn't available before JDK 18, and calculating it without intrinsics is quite slow, + * the multiplyHigh method is used instead. Slight loss in quality is imperceptible for our use-case: a hash table. + * {@code Math.multiplyHigh(a, b) ^ (a * b)} + * + *

+ * This indirection can be removed once we stop supporting older JDKs. + */ + private static Mux64 fastestMux64Impl() { + try { + final MethodHandle unsignedMultiplyHigh = MethodHandles.publicLookup() + .findStatic(Math.class, "unsignedMultiplyHigh", MethodType.methodType(long.class, long.class, long.class)); + return (a, b) -> { + try { + return (long) unsignedMultiplyHigh.invokeExact(a, b) ^ (a * b); + } catch (Throwable e) { + throw new RuntimeException(e); + } + }; + } catch (NoSuchMethodException e) { + return (a, b) -> Math.multiplyHigh(a, b) ^ (a * b); + } catch (IllegalAccessException e) { + throw new RuntimeException(e); + } + } +} diff --git a/libs/common/src/main/java/org/opensearch/common/hash/package-info.java b/libs/common/src/main/java/org/opensearch/common/hash/package-info.java new file mode 100644 index 0000000000000..bd393b8b921ed --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/hash/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Common hashing utilities. + */ +package org.opensearch.common.hash; diff --git a/libs/common/src/test/java/org/opensearch/common/hash/T1Ha1Tests.java b/libs/common/src/test/java/org/opensearch/common/hash/T1Ha1Tests.java new file mode 100644 index 0000000000000..e348fbf759bdd --- /dev/null +++ b/libs/common/src/test/java/org/opensearch/common/hash/T1Ha1Tests.java @@ -0,0 +1,312 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.lang.invoke.VarHandle; +import java.nio.ByteOrder; + +public class T1Ha1Tests extends HashFunctionTestCase { + private static final VarHandle LONG_HANDLE = MethodHandles.byteArrayViewVarHandle(long[].class, ByteOrder.LITTLE_ENDIAN); + private final byte[] scratch = new byte[8]; + + /** + * Inspired from the tests defined in the reference implementation: + * t1ha_selfcheck.c + */ + public void testSelfCheck() { + byte[] testPattern = { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + (byte) 0xFF, + 0x7F, + 0x3F, + 0x1F, + 0xF, + 8, + 16, + 32, + 64, + (byte) 0x80, + (byte) 0xFE, + (byte) 0xFC, + (byte) 0xF8, + (byte) 0xF0, + (byte) 0xE0, + (byte) 0xC0, + (byte) 0xFD, + (byte) 0xFB, + (byte) 0xF7, + (byte) 0xEF, + (byte) 0xDF, + (byte) 0xBF, + 0x55, + (byte) 0xAA, + 11, + 17, + 19, + 23, + 29, + 37, + 42, + 43, + 'a', + 'b', + 'c', + 'd', + 'e', + 'f', + 'g', + 'h', + 'i', + 'j', + 'k', + 'l', + 'm', + 'n', + 'o', + 'p', + 'q', + 'r', + 's', + 't', + 'u', + 'v', + 'w', + 'x' }; + + // Reference hashes when using {@link Math::unsignedMultiplyHigh} in the mux64 step. + // These values match the ones defined in the reference implementation: + // https://github.com/erthink/t1ha/blob/master/src/t1ha1_selfcheck.c#L51-L72 + long[] referenceUnsignedMultiplyHigh = { + 0L, + 0x6A580668D6048674L, + 0xA2FE904AFF0D0879L, + 0xE3AB9C06FAF4D023L, + 0x6AF1C60874C95442L, + 0xB3557E561A6C5D82L, + 0x0AE73C696F3D37C0L, + 0x5EF25F7062324941L, + 0x9B784F3B4CE6AF33L, + 0x6993BB206A74F070L, + 0xF1E95DF109076C4CL, + 0x4E1EB70C58E48540L, + 0x5FDD7649D8EC44E4L, + 0x559122C706343421L, + 0x380133D58665E93DL, + 0x9CE74296C8C55AE4L, + 0x3556F9A5757AB6D0L, + 0xF62751F7F25C469EL, + 0x851EEC67F6516D94L, + 0xED463EE3848A8695L, + 0xDC8791FEFF8ED3ACL, + 0x2569C744E1A282CFL, + 0xF90EB7C1D70A80B9L, + 0x68DFA6A1B8050A4CL, + 0x94CCA5E8210D2134L, + 0xF5CC0BEABC259F52L, + 0x40DBC1F51618FDA7L, + 0x0807945BF0FB52C6L, + 0xE5EF7E09DE70848DL, + 0x63E1DF35FEBE994AL, + 0x2025E73769720D5AL, + 0xAD6120B2B8A152E1L, + 0x2A71D9F13959F2B7L, + 0x8A20849A27C32548L, + 0x0BCBC9FE3B57884EL, + 0x0E028D255667AEADL, + 0xBE66DAD3043AB694L, + 0xB00E4C1238F9E2D4L, + 0x5C54BDE5AE280E82L, + 0x0E22B86754BC3BC4L, + 0x016707EBF858B84DL, + 0x990015FBC9E095EEL, + 0x8B9AF0A3E71F042FL, + 0x6AA56E88BD380564L, + 0xAACE57113E681A0FL, + 0x19F81514AFA9A22DL, + 0x80DABA3D62BEAC79L, + 0x715210412CABBF46L, + 0xD8FA0B9E9D6AA93FL, + 0x6C2FC5A4109FD3A2L, + 0x5B3E60EEB51DDCD8L, + 0x0A7C717017756FE7L, + 0xA73773805CA31934L, + 0x4DBD6BB7A31E85FDL, + 0x24F619D3D5BC2DB4L, + 0x3E4AF35A1678D636L, + 0x84A1A8DF8D609239L, + 0x359C862CD3BE4FCDL, + 0xCF3A39F5C27DC125L, + 0xC0FF62F8FD5F4C77L, + 0x5E9F2493DDAA166CL, + 0x17424152BE1CA266L, + 0xA78AFA5AB4BBE0CDL, + 0x7BFB2E2CEF118346L, + 0x647C3E0FF3E3D241L, + 0x0352E4055C13242EL, + 0x6F42FC70EB660E38L, + 0x0BEBAD4FABF523BAL, + 0x9269F4214414D61DL, + 0x1CA8760277E6006CL, + 0x7BAD25A859D87B5DL, + 0xAD645ADCF7414F1DL, + 0xB07F517E88D7AFB3L, + 0xB321C06FB5FFAB5CL, + 0xD50F162A1EFDD844L, + 0x1DFD3D1924FBE319L, + 0xDFAEAB2F09EF7E78L, + 0xA7603B5AF07A0B1EL, + 0x41CD044C0E5A4EE3L, + 0xF64D2F86E813BF33L, + 0xFF9FDB99305EB06AL }; + + // Reference hashes when using {@link Math::multiplyHigh} in the mux64 step. + long[] referenceMultiplyHigh = { + 0L, + 0xCE510B7405E0A2CAL, + 0xC0A2DA74A8271FCBL, + 0x1C549C06FAF4D023L, + 0x084CDA0ED41CD2D4L, + 0xD05BA7AA9FEECE5BL, + 0x7D6128AB2CCC4EB1L, + 0x62332FA6EC1B50AAL, + 0x1B66C81767870EF2L, + 0xEC6B92A37AED73B8L, + 0x1712987232EF4ED3L, + 0xAA503A04AE2450B5L, + 0x15D25DE445730A6CL, + 0xAB87E38AA8D21746L, + 0x18CAE735BBF62D15L, + 0x0D56DFF9914CA656L, + 0xCB4F5859A9AE5B52L, + 0xEE97003F7B1283E1L, + 0x50CFB2AF0F54BA6DL, + 0x570B4D6AE4C67814L, + 0x1ED59274A97497EBL, + 0x8608D03D165C59BFL, + 0x6CBE0E537BE04C02L, + 0xD4C8FCFD4179A874L, + 0xFB4E677D876118A1L, + 0x6B1A96F1B4765D79L, + 0x1075B9B89BDFE5F8L, + 0x02771D08F2891CB1L, + 0x4BB8E16FF410F19EL, + 0x3EB7849C0DFAF566L, + 0x173B09359DE422CFL, + 0xFE212C6DB7474306L, + 0xA74E7C2D632664EFL, + 0x56ECDED6546F0914L, + 0x08DEF866EF20A94BL, + 0x7D0BAC64606521F1L, + 0xCA6BA9817A357FA9L, + 0x0873B834A6E2AAE4L, + 0x45EE02D6DCF8992EL, + 0x3EA060225B3E1C1FL, + 0x24DBB6D02D5CC531L, + 0xE5E91A7340BF9382L, + 0x28975F86E2E2177FL, + 0x80E48374A6B42E85L, + 0xDF40392265BB4A66L, + 0x43750475A48C7023L, + 0x5648BD3E391C01D3L, + 0x9BE9E11AD1A6C369L, + 0x2E079CB8C1A11F50L, + 0xB2D538403F1020F1L, + 0x297518A4EF6AF5F1L, + 0xA8CE1B90167A6F8BL, + 0xB926B2FA50541BA9L, + 0xC46A2D3BD6925A35L, + 0x3071BC8E6C400487L, + 0x300D3885894BA47FL, + 0x840BFF3BEB7EEADDL, + 0xDC9E04DF744BDC0CL, + 0xBE01CF6841412C77L, + 0x6C55B2DC74B816A1L, + 0x4D4C63128A344F82L, + 0xC6227497E100B463L, + 0x53C9987705EA71C0L, + 0x3E355394668C3559L, + 0x05984B7D358B107AL, + 0x4D32FA1D79002A57L, + 0x910B0DAD1440EC24L, + 0x025BDE6A7BEBF320L, + 0x0D33817EF345D999L, + 0xBA0DE64B3F4DB34AL, + 0x54666461D0EB4FD7L, + 0x746ECFA92D1CAF81L, + 0x6E6A774ACD266DF2L, + 0x1A86161AE8E82A85L, + 0xFFF7C351A4CEC13DL, + 0xFFF05844F57498B8L, + 0x8DB71789127C6C13L, + 0x4A52ACF805F370ABL, + 0xFE13F90A1ACFBD58L, + 0x615730E301ED12E2L, + 0x1A2D4AA43B6C0103L }; + + long[] reference = hasUnsignedMultiplyHigh() ? referenceUnsignedMultiplyHigh : referenceMultiplyHigh; + + int offset = 0; + assertEquals(reference[offset++], T1ha1.hash(null, 0, 0, 0L)); // empty-zero + assertEquals(reference[offset++], T1ha1.hash(null, 0, 0, ~0L)); // empty-all1 + assertEquals(reference[offset++], T1ha1.hash(testPattern, 0, 64, 0L)); // bin64-zero + + long seed = 1; + for (int i = 1; i < 64; i++) { + assertEquals(reference[offset++], T1ha1.hash(testPattern, 0, i, seed)); // bin%i-1p%i + seed <<= 1; + } + + seed = ~0L; + for (int i = 1; i <= 7; i++) { + seed <<= 1; + assertEquals(reference[offset++], T1ha1.hash(testPattern, i, 64 - i, seed)); // align%i_F%i + } + + byte[] testPatternLong = new byte[512]; + for (int i = 0; i < testPatternLong.length; i++) { + testPatternLong[i] = (byte) i; + } + for (int i = 0; i <= 7; i++) { + assertEquals(reference[offset++], T1ha1.hash(testPatternLong, i, 128 + i * 17, seed)); // long-%05i + } + } + + @Override + public byte[] hash(byte[] input) { + long hash = T1ha1.hash(input, 0, input.length); + LONG_HANDLE.set(scratch, 0, hash); + return scratch; + } + + @Override + public int outputBits() { + return 64; + } + + private static boolean hasUnsignedMultiplyHigh() { + try { + MethodHandles.publicLookup() + .findStatic(Math.class, "unsignedMultiplyHigh", MethodType.methodType(long.class, long.class, long.class)); + return true; + } catch (NoSuchMethodException e) { + return false; + } catch (IllegalAccessException e) { + throw new RuntimeException(e); + } + } +} diff --git a/server/src/main/java/org/opensearch/common/util/BytesRefHash.java b/server/src/main/java/org/opensearch/common/util/BytesRefHash.java index ecc93d017beaf..4afba2905019a 100644 --- a/server/src/main/java/org/opensearch/common/util/BytesRefHash.java +++ b/server/src/main/java/org/opensearch/common/util/BytesRefHash.java @@ -33,152 +33,292 @@ package org.opensearch.common.util; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.Numbers; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.hash.T1ha1; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.core.common.util.ByteArray; /** - * Specialized hash table implementation similar to Lucene's BytesRefHash that maps - * BytesRef values to ids. Collisions are resolved with open addressing and linear - * probing, growth is smooth thanks to {@link BigArrays}, hashes are cached for faster - * re-hashing and capacity is always a multiple of 2 for faster identification of buckets. - * This class is not thread-safe. + * Specialized hash table implementation that maps a {@link BytesRef} key to a long ordinal. * - * @opensearch.internal + *

+ * It uses a compact byte-packing strategy to encode the ordinal and fingerprint information + * in the hash table value. It makes lookups faster by short-circuiting expensive equality checks + * for keys that collide onto the same hash table slot. + * + *

+ * This class is not thread-safe. + * + * @opensearch.internal */ -public final class BytesRefHash extends AbstractHash { +@InternalApi +public final class BytesRefHash implements Releasable { + private static final long MAX_CAPACITY = 1L << 32; + private static final long DEFAULT_INITIAL_CAPACITY = 32; + private static final float DEFAULT_LOAD_FACTOR = 0.6f; + private static final Hasher DEFAULT_HASHER = key -> T1ha1.hash(key.bytes, key.offset, key.length); + + private static final long MASK_ORDINAL = 0x00000000FFFFFFFFL; // extract ordinal + private static final long MASK_FINGERPRINT = 0xFFFFFFFF00000000L; // extract fingerprint + + /** + * Maximum load factor after which the capacity is doubled. + */ + private final float loadFactor; + + /** + * Calculates the hash of a {@link BytesRef} key. + */ + private final Hasher hasher; + + /** + * Utility class to allocate recyclable arrays. + */ + private final BigArrays bigArrays; + + /** + * Reusable BytesRef to read keys. + */ + private final BytesRef scratch = new BytesRef(); + + /** + * Current capacity of the hash table. This must be a power of two so that the hash table slot + * can be identified quickly using bitmasks, thus avoiding expensive modulo or integer division. + */ + private long capacity; + + /** + * Bitmask to identify the hash table slot from a key's hash. + */ + private long mask; + + /** + * Size threshold after which the hash table needs to be doubled in capacity. + */ + private long grow; + + /** + * Current size of the hash table. + */ + private long size; + + /** + * Underlying array to store the hash table values. + * + *

+ * Each hash table value (64-bit) uses the following byte packing strategy: + *

+     * |================================|================================|
+     * | Fingerprint                    | Ordinal                        |
+     * |--------------------------------|--------------------------------|
+     * | 32 bits                        | 32 bits                        |
+     * |================================|================================|
+     * 
+ * + *

+ * This allows us to encode and manipulate additional information in the hash table + * itself without having to look elsewhere in the memory, which is much slower. + * + *

+ * Terminology: table[index] = value = (fingerprint | ordinal) + */ + private LongArray table; + + /** + * Underlying array to store the starting offsets of keys. + * + *

+ * Terminology: + *

+     *   offsets[ordinal] = starting offset (inclusive)
+     *   offsets[ordinal + 1] = ending offset (exclusive)
+     * 
+ */ + private LongArray offsets; + + /** + * Underlying byte array to store the keys. + * + *

+ * Terminology: keys[start...end] = key + */ + private ByteArray keys; - private LongArray startOffsets; - private ByteArray bytes; - private IntArray hashes; // we cache hashes for faster re-hashing - private final BytesRef spare; + /** + * Pre-computed hashes of the stored keys. + * It is used to speed up reinserts when doubling the capacity. + */ + private LongArray hashes; - // Constructor with configurable capacity and default maximum load factor. - public BytesRefHash(long capacity, BigArrays bigArrays) { - this(capacity, DEFAULT_MAX_LOAD_FACTOR, bigArrays); + public BytesRefHash(final BigArrays bigArrays) { + this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_HASHER, bigArrays); } - // Constructor with configurable capacity and load factor. - public BytesRefHash(long capacity, float maxLoadFactor, BigArrays bigArrays) { - super(capacity, maxLoadFactor, bigArrays); - startOffsets = bigArrays.newLongArray(capacity + 1, false); - startOffsets.set(0, 0); - bytes = bigArrays.newByteArray(capacity * 3, false); - hashes = bigArrays.newIntArray(capacity, false); - spare = new BytesRef(); + public BytesRefHash(final long initialCapacity, final BigArrays bigArrays) { + this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_HASHER, bigArrays); } - // BytesRef has a weak hashCode function so we try to improve it by rehashing using Murmur3 - // Feel free to remove rehashing if BytesRef gets a better hash function - private static int rehash(int hash) { - return BitMixer.mix32(hash); + public BytesRefHash(final long initialCapacity, final float loadFactor, final BigArrays bigArrays) { + this(initialCapacity, loadFactor, DEFAULT_HASHER, bigArrays); } - /** - * Return the key at 0 <= index <= capacity(). The result is undefined if the slot is unused. - *

Beware that the content of the {@link BytesRef} may become invalid as soon as {@link #close()} is called

- */ - public BytesRef get(long id, BytesRef dest) { - final long startOffset = startOffsets.get(id); - final int length = (int) (startOffsets.get(id + 1) - startOffset); - bytes.get(startOffset, length, dest); - return dest; + public BytesRefHash(final long initialCapacity, final float loadFactor, final Hasher hasher, final BigArrays bigArrays) { + assert initialCapacity > 0 : "initial capacity must be greater than 0"; + assert loadFactor > 0 && loadFactor < 1 : "load factor must be between 0 and 1"; + + this.loadFactor = loadFactor; + this.hasher = hasher; + this.bigArrays = bigArrays; + + capacity = Numbers.nextPowerOfTwo((long) (initialCapacity / loadFactor)); + assert capacity <= MAX_CAPACITY : "required capacity too large"; + mask = capacity - 1; + size = 0; + grow = (long) (capacity * loadFactor); + + table = bigArrays.newLongArray(capacity, false); + table.fill(0, capacity, -1); + offsets = bigArrays.newLongArray(initialCapacity + 1, false); + offsets.set(0, 0); + keys = bigArrays.newByteArray(initialCapacity * 3, false); + hashes = bigArrays.newLongArray(initialCapacity, false); } /** - * Get the id associated with key + * Adds the given key to the hash table and returns its ordinal. + * If the key exists already, it returns (-1 - ordinal). */ - public long find(BytesRef key, int code) { - final long slot = slot(rehash(code), mask); - for (long index = slot;; index = nextSlot(index, mask)) { - final long id = id(index); - if (id == -1L || key.bytesEquals(get(id, spare))) { - return id; + public long add(final BytesRef key) { + final long hash = hasher.hash(key); + final long fingerprint = hash & MASK_FINGERPRINT; + + for (long idx = hash & mask, value, ordinal;; idx = (idx + 1) & mask) { + if ((value = table.get(idx)) == -1) { + final long val = fingerprint | size; + if (size >= grow) { + growAndInsert(hash, val); + } else { + table.set(idx, val); + } + return append(key, hash); + } else if (((value & MASK_FINGERPRINT) == fingerprint) && key.bytesEquals(get(ordinal = (value & MASK_ORDINAL), scratch))) { + return -1 - ordinal; } } } - /** Sugar for {@link #find(BytesRef, int) find(key, key.hashCode()} */ - public long find(BytesRef key) { - return find(key, key.hashCode()); - } + /** + * Returns the ordinal associated with the given key, or -1 if the key doesn't exist. + * + *

+ * Using the 64-bit hash value, up to 32 least significant bits (LSB) are used to identify the + * home slot in the hash table, and an additional 32 bits are used to identify the fingerprint. + * The fingerprint further increases the entropy and reduces the number of false lookups in the + * keys' table during equality checks, which is expensive. + * + *

+ * Total entropy bits = 32 + log2(capacity) + * + *

+ * Linear probing starts from the home slot, until a match or an empty slot is found. + * Values are first checked using their fingerprint (to reduce false positives), then verified + * in the keys' table using an equality check. + */ + public long find(final BytesRef key) { + final long hash = hasher.hash(key); + final long fingerprint = hash & MASK_FINGERPRINT; - private long set(BytesRef key, int code, long id) { - assert rehash(key.hashCode()) == code; - assert size < maxSize; - final long slot = slot(code, mask); - for (long index = slot;; index = nextSlot(index, mask)) { - final long curId = id(index); - if (curId == -1) { // means unset - id(index, id); - append(id, key, code); - ++size; - return id; - } else if (key.bytesEquals(get(curId, spare))) { - return -1 - curId; + for (long idx = hash & mask, value, ordinal;; idx = (idx + 1) & mask) { + if ((value = table.get(idx)) == -1) { + return -1; + } else if (((value & MASK_FINGERPRINT) == fingerprint) && key.bytesEquals(get(ordinal = (value & MASK_ORDINAL), scratch))) { + return ordinal; } } } - private void append(long id, BytesRef key, int code) { - assert size == id; - final long startOffset = startOffsets.get(size); - bytes = bigArrays.grow(bytes, startOffset + key.length); - bytes.set(startOffset, key.bytes, key.offset, key.length); - startOffsets = bigArrays.grow(startOffsets, size + 2); - startOffsets.set(size + 1, startOffset + key.length); - hashes = bigArrays.grow(hashes, id + 1); - hashes.set(id, code); + /** + * Returns the key associated with the given ordinal. + * The result is undefined for an unused ordinal. + * + *

+ * Beware that the content of the {@link BytesRef} may become invalid as soon as {@link #close()} is called + */ + public BytesRef get(final long ordinal, final BytesRef dest) { + final long start = offsets.get(ordinal); + final int length = (int) (offsets.get(ordinal + 1) - start); + keys.get(start, length, dest); + return dest; } - private boolean assertConsistent(long id, int code) { - get(id, spare); - return rehash(spare.hashCode()) == code; + /** + * Returns the number of mappings in this hash table. + */ + public long size() { + return size; } - private void reset(int code, long id) { - assert assertConsistent(id, code); - final long slot = slot(code, mask); - for (long index = slot;; index = nextSlot(index, mask)) { - final long curId = id(index); - if (curId == -1) { // means unset - id(index, id); - break; - } - } + /** + * Appends the key in the keys' and offsets' tables. + */ + private long append(final BytesRef key, final long hash) { + final long start = offsets.get(size); + final long end = start + key.length; + offsets = bigArrays.grow(offsets, size + 2); + offsets.set(size + 1, end); + keys = bigArrays.grow(keys, end); + keys.set(start, key.bytes, key.offset, key.length); + hashes = bigArrays.grow(hashes, size + 1); + hashes.set(size, hash); + return size++; } /** - * Try to add key. Return its newly allocated id if it wasn't in the hash table yet, or -1-id - * if it was already present in the hash table. + * Grows the hash table by doubling its capacity, inserting the provided value, + * and reinserting the previous values at their updated slots. */ - public long add(BytesRef key, int code) { - if (size >= maxSize) { - assert size == maxSize; - grow(); - } - assert size < maxSize; - return set(key, rehash(code), size); - } + private void growAndInsert(final long hash, final long value) { + // Ensure that the hash table doesn't grow too large. + // This implicitly also ensures that the ordinals are no larger than 2^32, thus, + // preventing them from polluting the fingerprint bits in the hash table values. + assert capacity < MAX_CAPACITY : "hash table already at the max capacity"; + + capacity <<= 1; + mask = capacity - 1; + grow = (long) (capacity * loadFactor); + table = bigArrays.grow(table, capacity); + table.fill(0, capacity, -1); + table.set(hash & mask, value); - /** Sugar to {@link #add(BytesRef, int) add(key, key.hashCode()}. */ - public long add(BytesRef key) { - return add(key, key.hashCode()); + for (long ordinal = 0; ordinal < size; ordinal++) { + reinsert(ordinal, hashes.get(ordinal)); + } } - @Override - protected void removeAndAdd(long index) { - final long id = id(index, -1); - assert id >= 0; - final int code = hashes.get(id); - reset(code, id); + /** + * Reinserts the hash table value for an existing key stored at the given ordinal. + */ + private void reinsert(final long ordinal, final long hash) { + for (long idx = hash & mask;; idx = (idx + 1) & mask) { + if (table.get(idx) == -1) { + table.set(idx, (hash & MASK_FINGERPRINT) | ordinal); + return; + } + } } @Override public void close() { - try (Releasable releasable = Releasables.wrap(bytes, hashes, startOffsets)) { - super.close(); - } + Releasables.close(table, offsets, keys, hashes); } + /** + * Hasher calculates the hash of a {@link BytesRef} key. + */ + @FunctionalInterface + public interface Hasher { + long hash(BytesRef key); + } } diff --git a/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java b/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java index 417eb6a316d86..86e7227cb6c85 100644 --- a/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java +++ b/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java @@ -8,7 +8,10 @@ package org.opensearch.common.util; +import org.opensearch.common.Numbers; +import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; /** * Specialized hash table implementation that maps a (primitive) long to long. @@ -24,6 +27,7 @@ * * @opensearch.internal */ +@InternalApi public class ReorganizingLongHash implements Releasable { private static final long MAX_CAPACITY = 1L << 32; private static final long DEFAULT_INITIAL_CAPACITY = 32; @@ -109,7 +113,8 @@ public ReorganizingLongHash(final long initialCapacity, final float loadFactor, this.bigArrays = bigArrays; this.loadFactor = loadFactor; - capacity = nextPowerOfTwo((long) (initialCapacity / loadFactor)); + capacity = Numbers.nextPowerOfTwo((long) (initialCapacity / loadFactor)); + assert capacity <= MAX_CAPACITY : "required capacity too large"; mask = capacity - 1; grow = (long) (capacity * loadFactor); size = 0; @@ -296,11 +301,6 @@ private void grow() { @Override public void close() { - table.close(); - keys.close(); - } - - private static long nextPowerOfTwo(final long value) { - return Math.max(1, Long.highestOneBit(value - 1) << 1); + Releasables.close(table, keys); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/BytesKeyedBucketOrds.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/BytesKeyedBucketOrds.java index 0eb23013d1e47..5d7c5c2976169 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/BytesKeyedBucketOrds.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/BytesKeyedBucketOrds.java @@ -131,7 +131,7 @@ private static class FromSingle extends BytesKeyedBucketOrds { private final BytesRefHash ords; private FromSingle(BigArrays bigArrays) { - ords = new BytesRefHash(1, bigArrays); + ords = new BytesRefHash(bigArrays); } @Override @@ -190,7 +190,7 @@ private static class FromMany extends BytesKeyedBucketOrds { private final LongKeyedBucketOrds longToBucketOrds; private FromMany(BigArrays bigArrays) { - bytesToLong = new BytesRefHash(1, bigArrays); + bytesToLong = new BytesRefHash(bigArrays); longToBucketOrds = LongKeyedBucketOrds.build(bigArrays, CardinalityUpperBound.MANY); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificanceLookup.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificanceLookup.java index aee4caa67afa1..34bbac55900a8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificanceLookup.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificanceLookup.java @@ -123,7 +123,7 @@ public void close() {} }; } return new BackgroundFrequencyForBytes() { - private final BytesRefHash termToPosition = new BytesRefHash(1, bigArrays); + private final BytesRefHash termToPosition = new BytesRefHash(bigArrays); private LongArray positionToFreq = bigArrays.newLongArray(1, false); @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java index c796faa6a8b76..cc35fe75e5e92 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java @@ -135,7 +135,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I Arrays.fill(mergeMap, -1); long offset = 0; for (int owningOrdIdx = 0; owningOrdIdx < owningBucketOrds.length; owningOrdIdx++) { - try (BytesRefHash bucketsInThisOwningBucketToCollect = new BytesRefHash(1, context.bigArrays())) { + try (BytesRefHash bucketsInThisOwningBucketToCollect = new BytesRefHash(context.bigArrays())) { filters[owningOrdIdx] = newFilter(); List builtBuckets = new ArrayList<>(); BytesKeyedBucketOrds.BucketOrdsEnum collectedBuckets = bucketOrds.ordsEnum(owningBucketOrds[owningOrdIdx]); diff --git a/server/src/test/java/org/opensearch/common/NumbersTests.java b/server/src/test/java/org/opensearch/common/NumbersTests.java index 5fb85d815ded2..7990ba74f162a 100644 --- a/server/src/test/java/org/opensearch/common/NumbersTests.java +++ b/server/src/test/java/org/opensearch/common/NumbersTests.java @@ -221,4 +221,25 @@ public void testToUnsignedBigInteger() { assertEquals(random, Numbers.toUnsignedBigInteger(random.longValue())); assertEquals(Numbers.MAX_UNSIGNED_LONG_VALUE, Numbers.toUnsignedBigInteger(Numbers.MAX_UNSIGNED_LONG_VALUE.longValue())); } + + public void testNextPowerOfTwo() { + // Negative values: + for (int i = 0; i < 1000; i++) { + long value = randomLongBetween(-500000, -1); + assertEquals(1, Numbers.nextPowerOfTwo(value)); + } + + // Zero value: + assertEquals(1, Numbers.nextPowerOfTwo(0L)); + + // Positive values: + for (int i = 0; i < 1000; i++) { + long value = randomLongBetween(1, 500000); + long nextPowerOfTwo = Numbers.nextPowerOfTwo(value); + + assertTrue(nextPowerOfTwo > value); // must be strictly greater + assertTrue((nextPowerOfTwo >>> 1) <= value); // must be greater by no more than one power of two + assertEquals(0, nextPowerOfTwo & (nextPowerOfTwo - 1)); // must be a power of two + } + } } diff --git a/server/src/test/java/org/opensearch/common/util/BytesRefHashTests.java b/server/src/test/java/org/opensearch/common/util/BytesRefHashTests.java index a78a35e5a2412..adcec8f07f702 100644 --- a/server/src/test/java/org/opensearch/common/util/BytesRefHashTests.java +++ b/server/src/test/java/org/opensearch/common/util/BytesRefHashTests.java @@ -35,6 +35,7 @@ import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.opensearch.common.hash.T1ha1; import org.opensearch.common.settings.Settings; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; @@ -44,6 +45,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.stream.Stream; public class BytesRefHashTests extends OpenSearchTestCase { @@ -57,9 +59,13 @@ private void newHash() { if (hash != null) { hash.close(); } - // Test high load factors to make sure that collision resolution works fine - final float maxLoadFactor = 0.6f + randomFloat() * 0.39f; - hash = new BytesRefHash(randomIntBetween(0, 100), maxLoadFactor, randomBigArrays()); + long seed = randomLong(); + hash = new BytesRefHash( + randomIntBetween(1, 100), // random capacity + 0.6f + randomFloat() * 0.39f, // random load factor to verify collision resolution + key -> T1ha1.hash(key.bytes, key.offset, key.length, seed), + randomBigArrays() + ); } @Override @@ -68,39 +74,34 @@ public void setUp() throws Exception { newHash(); } - public void testDuel() { - final int len = randomIntBetween(1, 100000); - final BytesRef[] values = new BytesRef[len]; - for (int i = 0; i < values.length; ++i) { - values[i] = new BytesRef(randomAlphaOfLength(5)); - } - final Map valueToId = new HashMap<>(); - final BytesRef[] idToValue = new BytesRef[values.length]; - final int iters = randomInt(1000000); - for (int i = 0; i < iters; ++i) { - final BytesRef value = randomFrom(values); - if (valueToId.containsKey(value)) { - assertEquals(-1 - valueToId.get(value), hash.add(value, value.hashCode())); + public void testFuzzy() { + Map reference = new HashMap<>(); + BytesRef[] keys = Stream.generate(() -> new BytesRef(randomAlphaOfLength(20))) + .limit(randomIntBetween(1000, 2000)) + .toArray(BytesRef[]::new); + + // Verify the behaviour of "add" and "find". + for (int i = 0; i < keys.length * 10; i++) { + BytesRef key = keys[i % keys.length]; + if (reference.containsKey(key)) { + long expectedOrdinal = reference.get(key); + assertEquals(-1 - expectedOrdinal, hash.add(key)); + assertEquals(expectedOrdinal, hash.find(key)); } else { - assertEquals(valueToId.size(), hash.add(value, value.hashCode())); - idToValue[valueToId.size()] = value; - valueToId.put(value, valueToId.size()); + assertEquals(-1, hash.find(key)); + reference.put(key, (long) reference.size()); + assertEquals((long) reference.get(key), hash.add(key)); } } - assertEquals(valueToId.size(), hash.size()); - for (final var next : valueToId.entrySet()) { - assertEquals(next.getValue().longValue(), hash.find(next.getKey(), next.getKey().hashCode())); + // Verify the behaviour of "get". + BytesRef scratch = new BytesRef(); + for (Map.Entry entry : reference.entrySet()) { + assertEquals(entry.getKey(), hash.get(entry.getValue(), scratch)); } - for (long i = 0; i < hash.capacity(); ++i) { - final long id = hash.id(i); - BytesRef spare = new BytesRef(); - if (id >= 0) { - hash.get(id, spare); - assertEquals(idToValue[(int) id], spare); - } - } + // Verify the behaviour of "size". + assertEquals(reference.size(), hash.size()); hash.close(); } diff --git a/test/framework/src/main/java/org/opensearch/common/hash/AvalancheStats.java b/test/framework/src/main/java/org/opensearch/common/hash/AvalancheStats.java new file mode 100644 index 0000000000000..c1600abcacd3e --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/common/hash/AvalancheStats.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import java.util.Locale; + +/** + * Represents the avalanche statistics of a hash function. + */ +public class AvalancheStats { + private final int inputBits; + private final int outputBits; + private final double bias; + private final double sumOfSquaredErrors; + + public AvalancheStats(int[][] flips, int iterations) { + this.inputBits = flips.length; + this.outputBits = flips[0].length; + double sumOfBiases = 0; + double sumOfSquaredErrors = 0; + + for (int i = 0; i < inputBits; i++) { + for (int o = 0; o < outputBits; o++) { + sumOfSquaredErrors += Math.pow(0.5 - ((double) flips[i][o] / iterations), 2); + sumOfBiases += 2 * ((double) flips[i][o] / iterations) - 1; + } + } + + this.bias = Math.abs(sumOfBiases / (inputBits * outputBits)); + this.sumOfSquaredErrors = sumOfSquaredErrors; + } + + public double bias() { + return bias; + } + + public double diffusion() { + return 1 - bias; + } + + public double sumOfSquaredErrors() { + return sumOfSquaredErrors; + } + + @Override + public String toString() { + return String.format( + Locale.ROOT, + "AvalancheStats{inputBits=%d, outputBits=%d, bias=%.4f%%, diffusion=%.4f%%, sumOfSquaredErrors=%.2f}", + inputBits, + outputBits, + bias() * 100, + diffusion() * 100, + sumOfSquaredErrors() + ); + } +} diff --git a/test/framework/src/main/java/org/opensearch/common/hash/HashFunctionTestCase.java b/test/framework/src/main/java/org/opensearch/common/hash/HashFunctionTestCase.java new file mode 100644 index 0000000000000..e272fe0962047 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/common/hash/HashFunctionTestCase.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import org.opensearch.common.Randomness; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Arrays; +import java.util.Random; + +/** + * Base class for testing the quality of hash functions. + */ +public abstract class HashFunctionTestCase extends OpenSearchTestCase { + private static final int[] INPUT_BITS = new int[] { 24, 32, 40, 48, 56, 64, 72, 80, 96, 112, 128, 160, 512, 1024 }; + private static final int ITERATIONS = 1000; + private static final double BIAS_THRESHOLD = 0.01; // 1% + + public abstract byte[] hash(byte[] input); + + public abstract int outputBits(); + + /** + * Tests if the hash function shows an avalanche effect, i.e, flipping a single input bit + * should flip half the output bits. + */ + public void testAvalanche() { + for (int inputBits : INPUT_BITS) { + AvalancheStats stats = simulate(inputBits); + if (stats.bias() >= BIAS_THRESHOLD) { + fail("bias exceeds threshold: " + stats); + } + } + } + + private AvalancheStats simulate(int inputBits) { + int outputBits = outputBits(); + assert inputBits % 8 == 0; // using full bytes for simplicity + assert outputBits % 8 == 0; // using full bytes for simplicity + byte[] input = new byte[inputBits >>> 3]; + Random random = Randomness.get(); + int[][] flips = new int[inputBits][outputBits]; + + for (int iter = 0; iter < ITERATIONS; iter++) { + random.nextBytes(input); + byte[] hash = Arrays.copyOf(hash(input), outputBits >>> 3); // copying since the underlying byte-array is reused + + for (int i = 0; i < inputBits; i++) { + flipBit(input, i); // flip one bit + byte[] newHash = hash(input); // recompute the hash; half the bits should have flipped + flipBit(input, i); // return to original + + for (int o = 0; o < outputBits; o++) { + flips[i][o] += getBit(hash, o) ^ getBit(newHash, o); + } + } + } + + return new AvalancheStats(flips, ITERATIONS); + } + + private static void flipBit(byte[] input, int position) { + int offset = position / 8; + int bit = position & 7; + input[offset] ^= (1 << bit); + } + + private static int getBit(byte[] input, int position) { + int offset = position / 8; + int bit = position & 7; + return (input[offset] >>> bit) & 1; + } +} diff --git a/test/framework/src/test/java/org/opensearch/common/hash/HashFunctionTestCaseTests.java b/test/framework/src/test/java/org/opensearch/common/hash/HashFunctionTestCaseTests.java new file mode 100644 index 0000000000000..d5fdaf10999fc --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/common/hash/HashFunctionTestCaseTests.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import org.apache.lucene.util.StringHelper; +import org.opensearch.test.OpenSearchTestCase; + +import java.lang.invoke.MethodHandles; +import java.lang.invoke.VarHandle; +import java.nio.ByteOrder; +import java.util.Arrays; + +public class HashFunctionTestCaseTests extends OpenSearchTestCase { + private static final VarHandle INT_HANDLE = MethodHandles.byteArrayViewVarHandle(int[].class, ByteOrder.LITTLE_ENDIAN); + + /** + * Asserts the positive case where a hash function passes the avalanche test. + */ + public void testStrongHashFunction() { + HashFunctionTestCase murmur3 = new HashFunctionTestCase() { + private final byte[] scratch = new byte[4]; + + @Override + public byte[] hash(byte[] input) { + int hash = StringHelper.murmurhash3_x86_32(input, 0, input.length, StringHelper.GOOD_FAST_HASH_SEED); + INT_HANDLE.set(scratch, 0, hash); + return scratch; + } + + @Override + public int outputBits() { + return 32; + } + }; + + murmur3.testAvalanche(); + } + + /** + * Asserts the negative case where a hash function fails the avalanche test. + */ + public void testWeakHashFunction() { + HashFunctionTestCase arraysHashCode = new HashFunctionTestCase() { + private final byte[] scratch = new byte[4]; + + @Override + public byte[] hash(byte[] input) { + int hash = Arrays.hashCode(input); + INT_HANDLE.set(scratch, 0, hash); + return scratch; + } + + @Override + public int outputBits() { + return 32; + } + }; + + AssertionError ex = expectThrows(AssertionError.class, arraysHashCode::testAvalanche); + assertTrue(ex.getMessage().contains("bias exceeds threshold")); + } +} From 798cc0cd4b69b014bec32700cf508533f5bfa42b Mon Sep 17 00:00:00 2001 From: panguixin Date: Sat, 26 Aug 2023 01:19:28 +0800 Subject: [PATCH 29/30] Fix test DeletePitMultiNodeIT.testDeleteWhileSearch (#9482) Signed-off-by: panguixin --- .../search/pit/DeletePitMultiNodeIT.java | 23 ++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java index 799996d4b97dc..43b7179a335f8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java @@ -8,6 +8,7 @@ package org.opensearch.search.pit; +import org.opensearch.ExceptionsHelper; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; @@ -18,10 +19,14 @@ import org.opensearch.action.search.DeletePitInfo; import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; +import org.opensearch.search.SearchContextMissingException; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; @@ -263,18 +268,23 @@ public void testDeleteWhileSearch() throws Exception { try { latch.await(); for (int j = 0; j < 30; j++) { - client().prepareSearch() + SearchResponse searchResponse = client().prepareSearch() .setSize(2) .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) .execute() .get(); + if (searchResponse.getFailedShards() != 0) { + verifySearchContextMissingException(searchResponse.getShardFailures()); + } } } catch (Exception e) { /** * assert for exception once delete pit goes through. throw error in case of any exeption before that. */ if (deleted.get() == true) { - if (!e.getMessage().contains("all shards failed")) throw new AssertionError(e); + Throwable t = ExceptionsHelper.unwrapCause(e.getCause()); + assertTrue(e.toString(), t instanceof SearchPhaseExecutionException); + verifySearchContextMissingException(((SearchPhaseExecutionException) t).shardFailures()); return; } throw new AssertionError(e); @@ -283,9 +293,9 @@ public void testDeleteWhileSearch() throws Exception { threads[i].setName("opensearch[node_s_0][search]"); threads[i].start(); } + deleted.set(true); ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); - deleted.set(true); for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { assertTrue(pitIds.contains(deletePitInfo.getPitId())); assertTrue(deletePitInfo.isSuccessful()); @@ -296,6 +306,13 @@ public void testDeleteWhileSearch() throws Exception { } } + private void verifySearchContextMissingException(ShardSearchFailure[] failures) { + for (ShardSearchFailure failure : failures) { + Throwable cause = ExceptionsHelper.unwrapCause(failure.getCause()); + assertTrue(failure.toString(), cause instanceof SearchContextMissingException); + } + } + public void testtConcurrentDeletes() throws InterruptedException, ExecutionException { CreatePitResponse pitResponse = createPitOnIndex("index"); ensureGreen(); From 8cfde6ca4031047b4f7e5ecf706e434e1c535567 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 25 Aug 2023 13:20:51 -0400 Subject: [PATCH 30/30] Bump netty from 4.1.96.Final to 4.1.97.Final (#9553) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- .../licenses/netty-buffer-4.1.96.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.97.Final.jar.sha1 | 1 + .../transport-netty4/licenses/netty-codec-4.1.96.Final.jar.sha1 | 1 - .../transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.96.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.97.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.96.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.97.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.96.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.97.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.96.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.97.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.96.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.97.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.96.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.97.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.96.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.97.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.96.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.97.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.96.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.97.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.96.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.97.Final.jar.sha1 | 1 + .../licenses/netty-handler-proxy-4.1.96.Final.jar.sha1 | 1 - .../licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.96.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.96.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.97.Final.jar.sha1 | 1 + .../repository-hdfs/licenses/netty-all-4.1.96.Final.jar.sha1 | 1 - .../repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-buffer-4.1.96.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-codec-4.1.96.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.96.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.97.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.96.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.97.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-common-4.1.96.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-handler-4.1.96.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-resolver-4.1.96.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.96.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.97.Final.jar.sha1 | 1 + .../netty-transport-classes-epoll-4.1.96.Final.jar.sha1 | 1 - .../netty-transport-classes-epoll-4.1.97.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.96.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.97.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-buffer-4.1.96.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-codec-4.1.96.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.96.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.97.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-common-4.1.96.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-handler-4.1.96.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-resolver-4.1.96.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.96.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.97.Final.jar.sha1 | 1 + 68 files changed, 35 insertions(+), 34 deletions(-) delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.96.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.97.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.96.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.96.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.97.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.96.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.97.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.96.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.97.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.96.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.97.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.96.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.97.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.96.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.97.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.96.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.97.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.96.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.96.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.96.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.96.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.96.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.96.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.97.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-common-4.1.96.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.96.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.96.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.96.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.97.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.96.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.97.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.96.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.96.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.96.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.96.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.96.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.96.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.96.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 555d51cb1d066..96af8741a0de9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -121,6 +121,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.github.luben:zstd-jni` from 1.5.5-3 to 1.5.5-5 ([#9431](https://github.com/opensearch-project/OpenSearch/pull/9431) - Bump `actions/setup-java` from 2 to 3 ([#9457](https://github.com/opensearch-project/OpenSearch/pull/9457)) - Bump `com.google.api:gax` from 2.27.0 to 2.32.0 ([#9300](https://github.com/opensearch-project/OpenSearch/pull/9300)) +- Bump `netty` from 4.1.96.Final to 4.1.97.Final ([#9553](https://github.com/opensearch-project/OpenSearch/pull/9553)) ### Changed - Default to mmapfs within hybridfs ([#8508](https://github.com/opensearch-project/OpenSearch/pull/8508)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index a0804687732dc..0d84ddebb905c 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -28,7 +28,7 @@ jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.5.0 -netty = 4.1.96.Final +netty = 4.1.97.Final joda = 2.12.2 # client dependencies diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.96.Final.jar.sha1 deleted file mode 100644 index 7abdb33dc79a2..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b80fffbe77485b457bf844289bf1801f61b9e91 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..8430355365996 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +f8f3d8644afa5e6e1a40a3a6aeb9d9aa970ecb4f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.96.Final.jar.sha1 deleted file mode 100644 index 8fdb32be1de0b..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9cfe430f8b14e7ba86969d8e1126aa0aae4d18f0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..7a36dc1f2724f --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +384ba4d75670befbedb45c4d3b497a93639c206d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.96.Final.jar.sha1 deleted file mode 100644 index dfb0cf39463e2..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a4d0d95df5026965c454902ef3d6d84b81f89626 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..37b78a32f741f --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +af78acec783ffd77c63d8aeecc21041fd39ac54f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 deleted file mode 100644 index 2fc787ee65197..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cc8baf4ff67c1bcc0cde60bc5c2bb9447d92d9e6 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..cbf685a6d79d3 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +893888d09a7bef0d0ba973d7471943e765d0fd08 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.96.Final.jar.sha1 deleted file mode 100644 index 85b5f52749671..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d10c167623cbc471753f950846df241d1021655c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..1bdfec3aae6ba --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +7cceacaf11df8dc63f23d0fb58e9d4640fc88404 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.96.Final.jar.sha1 deleted file mode 100644 index fe4f48c68e78b..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7840d7523d709e02961b647546f9d9dde1699306 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..8b7b50a6fc9c6 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +abb86c6906bf512bf2b797a41cd7d2e8d3cd7c36 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.96.Final.jar.sha1 deleted file mode 100644 index 9e93f013226cd..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0e51db5568a881e0f9b013b35617c597dc32f130 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..032959e98d009 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +cec8348108dc76c47cf87c669d514be52c922144 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.96.Final.jar.sha1 deleted file mode 100644 index 707285d3d29c3..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dbd15ca244be28e1a98ed29b9d755edbfa737e02 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..107863c1b3c9d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +f37380d23c9bb079bc702910833b2fd532c9abd0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 deleted file mode 100644 index e911c47d5ab1a..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -daf8578cade63a01525ee9d70371fa78e6e91094 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..f736d37d071b7 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +d469d84265ab70095b01b40886cabdd433b6e664 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.96.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.96.Final.jar.sha1 deleted file mode 100644 index 42d5e60ce9d45..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -afd90dc0e164be74b4a3e1a899890557fce98567 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..f592ac8312a5d --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +d266d079ef33cf93a16b382d64dd15d562df1159 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 deleted file mode 100644 index 2fc787ee65197..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cc8baf4ff67c1bcc0cde60bc5c2bb9447d92d9e6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..cbf685a6d79d3 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +893888d09a7bef0d0ba973d7471943e765d0fd08 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.96.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.96.Final.jar.sha1 deleted file mode 100644 index 8e959bdac5079..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f53c52dbddaa4a02a51430405792d3f30a89b147 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..d06147a0ba646 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +30e8fa29a349db5a933225d61891b8802836bb79 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.96.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.96.Final.jar.sha1 deleted file mode 100644 index d410208dada90..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dcabd63f4aaec2b4cad7588bfdd4cd2c82287e38 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..67c3a763d26fa --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +a99ecef0e1d86a92e40a7c89805c236d9cd7493e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.96.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.96.Final.jar.sha1 deleted file mode 100644 index 5041cf5473505..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0095023cc667af76578c9be326a6d54e3e1de52c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..60fd706436ae7 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +2c50f835777ecd4535e15b552b5d9ccb26a2504f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 deleted file mode 100644 index e911c47d5ab1a..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -daf8578cade63a01525ee9d70371fa78e6e91094 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..f736d37d071b7 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +d469d84265ab70095b01b40886cabdd433b6e664 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.96.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.96.Final.jar.sha1 deleted file mode 100644 index 32ced5451cfb6..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2145ec747511965e4a57099767654cf9083ce8a7 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..c6fa4cc175222 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +afec3c414a0ab7264a66a7572e9e9d3a19a3e0e5 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.96.Final.jar.sha1 deleted file mode 100644 index 7abdb33dc79a2..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b80fffbe77485b457bf844289bf1801f61b9e91 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..8430355365996 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +f8f3d8644afa5e6e1a40a3a6aeb9d9aa970ecb4f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.96.Final.jar.sha1 deleted file mode 100644 index 8fdb32be1de0b..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9cfe430f8b14e7ba86969d8e1126aa0aae4d18f0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..7a36dc1f2724f --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +384ba4d75670befbedb45c4d3b497a93639c206d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.96.Final.jar.sha1 deleted file mode 100644 index dfb0cf39463e2..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a4d0d95df5026965c454902ef3d6d84b81f89626 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..37b78a32f741f --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +af78acec783ffd77c63d8aeecc21041fd39ac54f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 deleted file mode 100644 index 2fc787ee65197..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cc8baf4ff67c1bcc0cde60bc5c2bb9447d92d9e6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..cbf685a6d79d3 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +893888d09a7bef0d0ba973d7471943e765d0fd08 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.96.Final.jar.sha1 deleted file mode 100644 index 85b5f52749671..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d10c167623cbc471753f950846df241d1021655c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..1bdfec3aae6ba --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +7cceacaf11df8dc63f23d0fb58e9d4640fc88404 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.96.Final.jar.sha1 deleted file mode 100644 index fe4f48c68e78b..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7840d7523d709e02961b647546f9d9dde1699306 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..8b7b50a6fc9c6 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +abb86c6906bf512bf2b797a41cd7d2e8d3cd7c36 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.96.Final.jar.sha1 deleted file mode 100644 index 9e93f013226cd..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0e51db5568a881e0f9b013b35617c597dc32f130 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..032959e98d009 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +cec8348108dc76c47cf87c669d514be52c922144 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.96.Final.jar.sha1 deleted file mode 100644 index 707285d3d29c3..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dbd15ca244be28e1a98ed29b9d755edbfa737e02 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..107863c1b3c9d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +f37380d23c9bb079bc702910833b2fd532c9abd0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.96.Final.jar.sha1 deleted file mode 100644 index 58564d9da4b27..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b0369501645f6e71f89ff7f77b5c5f52510a2e31 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..8e40c8826d76d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +795da37ded759e862457a82d9d92c4d39ce8ecee \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 deleted file mode 100644 index e911c47d5ab1a..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -daf8578cade63a01525ee9d70371fa78e6e91094 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..f736d37d071b7 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +d469d84265ab70095b01b40886cabdd433b6e664 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.96.Final.jar.sha1 deleted file mode 100644 index 7abdb33dc79a2..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b80fffbe77485b457bf844289bf1801f61b9e91 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..8430355365996 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +f8f3d8644afa5e6e1a40a3a6aeb9d9aa970ecb4f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.96.Final.jar.sha1 deleted file mode 100644 index 8fdb32be1de0b..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9cfe430f8b14e7ba86969d8e1126aa0aae4d18f0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..7a36dc1f2724f --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +384ba4d75670befbedb45c4d3b497a93639c206d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.96.Final.jar.sha1 deleted file mode 100644 index dfb0cf39463e2..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a4d0d95df5026965c454902ef3d6d84b81f89626 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..37b78a32f741f --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +af78acec783ffd77c63d8aeecc21041fd39ac54f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.96.Final.jar.sha1 deleted file mode 100644 index 85b5f52749671..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d10c167623cbc471753f950846df241d1021655c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..1bdfec3aae6ba --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +7cceacaf11df8dc63f23d0fb58e9d4640fc88404 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.96.Final.jar.sha1 deleted file mode 100644 index fe4f48c68e78b..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7840d7523d709e02961b647546f9d9dde1699306 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..8b7b50a6fc9c6 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +abb86c6906bf512bf2b797a41cd7d2e8d3cd7c36 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.96.Final.jar.sha1 deleted file mode 100644 index 9e93f013226cd..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0e51db5568a881e0f9b013b35617c597dc32f130 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..032959e98d009 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +cec8348108dc76c47cf87c669d514be52c922144 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.96.Final.jar.sha1 deleted file mode 100644 index 707285d3d29c3..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dbd15ca244be28e1a98ed29b9d755edbfa737e02 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 new file mode 100644 index 0000000000000..107863c1b3c9d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 @@ -0,0 +1 @@ +f37380d23c9bb079bc702910833b2fd532c9abd0 \ No newline at end of file